diff --git a/build.gradle b/build.gradle index de33f53a7..5ab63a88a 100644 --- a/build.gradle +++ b/build.gradle @@ -283,9 +283,9 @@ tasks.register('deploy') { dependsOn 'jar' dependsOn 'releaseJDocs' - dependsOn 'publishToSonatype' } // 3: +// execute dependsOn 'publishToSonatype' then // go to https://oss.sonatype.org/#stagingRepositories // close and release the repository diff --git a/docs/coverage/test/html/index.html b/docs/coverage/test/html/index.html index 9040a6907..8f713a3f7 100644 --- a/docs/coverage/test/html/index.html +++ b/docs/coverage/test/html/index.html @@ -1 +1 @@ -neureka

neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total13,739 of 91,71385%1,795 of 7,59376%2,2288,3511,94416,2157524,54041537
neureka.backend.main.implementations.broadcast1,2752,27164%6014771%2063841956701642782768
neureka1,1478,08587%16059678%2039391571,54251560125
neureka.devices.file8132,33574%6512766%8221614459026120016
neureka.devices.opencl7603,38981%7928478%9136210469627178015
neureka.backend.main.operations.other7183,22081%9422070%9629010978624133112
neureka.common.utility6733,54184%12624666%1383815765441195010
neureka.autograd6462,24977%9624671%983149352723142012
neureka.devices.host.machine59782558%831918%618094182152804
neureka.backend.main.implementations.elementwise5443,22885%6427481%1023736671851204238
neureka.backend.main.operations.operator5263,36786%6325780%73261596722010107
neureka.devices.host4492,32283%6234284%562554745135303
neureka.backend.api.template.algorithms4462,04482%6520876%592266043928807
neureka.backend.api4231,92081%6213668%602454937614146020
neureka.backend.main.algorithms3181,98986%6516071%692204942213107012
neureka.dtype.custom2931,34782%238378%622407934848187011
neureka.backend.main.implementations.scalar27114134%16833%1623569351103
neureka.framing2641,24482%4910568%371274725635002
neureka.backend.main.operations.linear.internal.blas2535,48295%828097%15240231,19379607
neureka.backend.main.implementations.fun2481,36184%111557%453034029038290570
neureka.backend.api.template.operations23455470%364857%40911915674903
neureka.math23189579%1979%50159502334514705
neureka.devices.opencl.utility2121,59588%303251%32722928644004
neureka.backend.main.implementations.convolution2081,79689%4018682%361473639023407
neureka.math.parsing1551,84792%6326380%591871435902402
neureka.backend.main.operations.linear13754679%253759%24521613452105
neureka.backend.main.operations.other.internal1251,47492%1714589%161291327704503
neureka.backend.main.implementations.fun.api11714455%1016%16241142111803
neureka.view1072,16295%3824486%40223737628204
neureka.ndim.config1051,44093%4920380%501771125125105
neureka.backend.main.operations.indexer9648283%123675%10361110601202
neureka.optimization.implementations9563987%50%1743191151642010
neureka.backend.main.operations9323771%141450%1228186031402
neureka.math.implementations8890091%1512989%231301517495804
neureka.backend.main.operations.linear.internal.opencl8683590%163568%1747712222104
neureka.fluent.slicing7773090%115583%1353714222003
neureka.devices1,26994%189684%221389222481017
neureka.ndim26279%101458%114185732905
neureka.math.args24177%101661%1551754638215
neureka.backend.ocl1,06394%81869%71081018409502
neureka.ndim.config.types.permuted73392%121453%1581812686804
neureka.common.composition50489%195775%22691110153117
neureka.ndim.config.types.sliced1,08395%122870%12104415648405
neureka.fluent.building75394%147283%1266713202301
neureka.devices.host.concurrent21882%990%1027105292213
neureka.backend.main.implementations.matmul42090%73382%73057301002
neureka.backend.main.implementations.linear37689%101661%102386701002
neureka.ndim.iterator.types.sliced36889%81666%94787953504
neureka.dtype28787%84083%93865711402
neureka.backend.api.template.implementations45%50%251100201
neureka.ndim.iterator.types.virtual28%n/a71171171101
neureka.optimization65%n/a51031151013
neureka.ndim.config.types.views17689%1191%42343931701
neureka.ndim.iterator.types.permuted25693%2083%42845521602
neureka.fluent.building.states67%n/a14381402
neureka.backend.main.memory31396%3685%64125602002
neureka.backend.main.implementations33096%81463%72636401504
neureka.backend.api.ini83%n/a182101803
neureka.backend.cpu74798%83%282111817901
neureka.backend.main.operations.functions37398%1285%235091028024
neureka.ndim.config.types.views.virtual93%n/a21721721701
neureka.ndim.config.types.simple800100%22100%093012808205
neureka.ndim.iterator.types.simple319100%24100%03606402403
neureka.ndim.iterator120100%20100%0150170502
neureka.framing.fluent95100%n/a01402501402
neureka.ndim.config.types100%n/a03030303
\ No newline at end of file +neureka

neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total19,972 of 92,38878%2,112 of 7,63372%2,7428,4032,99516,3131,1244,57283539
neureka.devices.opencl3,52573217%23612935%2883645807021621791115
neureka.backend.main.implementations.broadcast1,7491,79750%7812962%2383842966701882784268
neureka1,2338,07786%18058476%2219511741,55957568126
neureka.devices.opencl.utility1,06875641%451727%4372138286124014
neureka.devices.file8092,34174%6412866%8121614459126120016
neureka.backend.main.implementations.elementwise7553,01779%6926979%115373112718642041138
neureka.backend.ocl73040035%260%10210866184899502
neureka.backend.main.operations.other7283,21081%9621869%9829011178624133112
neureka.common.utility6733,54384%12624666%1383815765541195010
neureka.devices.host.machine67175152%822019%6180102182162804
neureka.autograd6522,25677%9824471%1003149352723142012
neureka.backend.main.operations.operator5263,45486%6526980%75269596842010207
neureka.devices.host4732,29882%6533983%592555245145303
neureka.backend.api.template.algorithms4472,04382%6720675%612266043928807
neureka.backend.api4331,92981%6413467%642455137716146020
neureka.backend.main.algorithms3691,93783%6615970%702196142214106012
neureka.backend.main.implementations.fun3401,26978%111557%913038629084290570
neureka.backend.main.implementations3284%220%23266264121534
neureka.devices3041,15479%537157%4814655251984017
neureka.dtype.custom2931,34782%238378%622407934848187011
neureka.backend.main.implementations.convolution2891,71585%4218481%391475139053417
neureka.backend.main.implementations.scalar27713532%16833%1723599361113
neureka.framing2661,24282%5010467%381274725635002
neureka.backend.main.operations.linear.internal.blas2535,48295%828097%15240231,19379607
neureka.backend.api.template.operations24154769%364857%42912115694903
neureka.math23189579%1979%50159502334514705
neureka.backend.main.operations.linear20356573%293957%27572414752305
neureka.math.parsing1551,84792%6326380%591871435902402
neureka.backend.main.operations.linear.internal.opencl15477683%213058%24471012252104
neureka.ndim.config1431,48991%4920380%6619527269186906
neureka.backend.main.implementations.matmul13533070%112972%1230237331012
neureka.backend.main.operations.other.internal1251,48392%1714589%161291327704503
neureka.backend.main.implementations.fun.api10914456%1016%16241142111803
neureka.view1072,16295%3824486%40223737628204
neureka.math.implementations9789190%2212284%291301517495804
neureka.backend.main.operations.indexer9648283%123675%10361110601202
neureka.optimization.implementations9563987%50%1743191151642010
neureka.backend.main.operations9323771%141450%1228186031402
neureka.fluent.slicing73090%115583%1353714222003
neureka.ndim26279%101458%114185732905
neureka.math.args24177%101661%1551754638215
neureka.ndim.config.types.permuted73392%121453%1581812686804
neureka.common.composition52189%195775%22691110153117
neureka.backend.main.implementations.linear36586%101661%112396711002
neureka.ndim.config.types.sliced1,08395%122870%12104415648405
neureka.fluent.building75394%147283%1266713202301
neureka.devices.host.concurrent24784%990%1027105292213
neureka.ndim.iterator.types.sliced36889%81666%94787953504
neureka.dtype28787%84083%93865711402
neureka.backend.api.template.implementations45%50%251100201
neureka.ndim.iterator.types.virtual28%n/a71171171101
neureka.optimization65%n/a51031151013
neureka.ndim.config.types.views17689%1191%42343931701
neureka.ndim.iterator.types.permuted25693%2083%42845521602
neureka.fluent.building.states67%n/a14381402
neureka.backend.main.memory31396%3685%64125602002
neureka.backend.api.ini83%n/a182101803
neureka.backend.cpu74798%83%282111817901
neureka.backend.main.operations.functions37398%1285%235091028024
neureka.ndim.config.types.views.virtual93%n/a21721721701
neureka.ndim.config.types.simple800100%22100%093012808205
neureka.ndim.iterator.types.simple319100%24100%03606402403
neureka.ndim.iterator129100%20100%0150170502
neureka.framing.fluent95100%n/a01402501402
neureka.ndim.config.types100%n/a03030303
\ No newline at end of file diff --git a/docs/coverage/test/html/jacoco-sessions.html b/docs/coverage/test/html/jacoco-sessions.html index 55de35950..df7071f28 100644 --- a/docs/coverage/test/html/jacoco-sessions.html +++ b/docs/coverage/test/html/jacoco-sessions.html @@ -1 +1 @@ -Sessions

Sessions

This coverage report is based on execution data from the following sessions:

SessionStart TimeDump Time
Aotearoa-f28aa40829 Jun 2023, 16:30:1329 Jun 2023, 16:31:38

Execution data for the following classes is considered in this report:

ClassId
ComplexNumber4a8adf930d04b26e
Example_Spec85d2fc8c0164109f
Kotlin_Compatibility_Unit_Testinga4f1a239df40c4e8
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.1aae93a7e79fde227
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.10acbc2b8265b1e3e6
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.11eda6c1f08b83b12a
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.20255e80ca8699b15
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.32f32ea99c6fcdb85
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.49d56ca690f6da838
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.5876cfc0710a14c80
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.621a688451dfefe26
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.7a4df09935754207f
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.8dda895ef437a1c8a
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.9d5d8afbc6fe8f37c
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.19eb98ada798ab6e3
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.2a9b0e4010d27e195
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.3e82a45a51744e134
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.4f0686ade65f731a0
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.187d82ce1eb12ef07
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.2f34d80a0191955c1
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.32d4b8b8f108a8a81
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.4f7aa9067099939ac
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.57ca734a6d719515e
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.6.199623e82602e28ff
Kotlin_Compatibility_Unit_Testing.settings_API_for_formatting_tensors_allows_us_to_configure_the_indent.1e5fb3ef565a7ceb4
Kotlin_Compatibility_Unit_Testing.settings_API_for_formatting_tensors_allows_us_to_configure_the_indent.t.15551e3ea4b2737eb
Kotlin_Compatibility_Unit_Testing.settings_API_for_formatting_tensors_is_convenient_in_kotlin.130822e153b0551b6
Kotlin_Compatibility_Unit_Testing.settings_API_for_formatting_tensors_is_convenient_in_kotlin.2e245a0685e73cc57
Kotlin_Compatibility_Unit_Testing.settings_API_for_formatting_tensors_is_convenient_in_kotlin.t.1881031bf60867924
Kotlin_Compatibility_Unit_Testing.setupSpec.19d15a57a2049a964
Kotlin_Compatibility_Unit_Testing.tensor_operations_translate_to_custom_ComplexNumber_type_written_in_kotlin.a.1a60cac6f04aefc68
Kotlin_Compatibility_Unit_Testing.tensor_operations_translate_to_custom_ComplexNumber_type_written_in_kotlin.b.1c9216b6019ceb731
Kotlin_Compatibility_Unit_Testing.we_can_create_nd_arrays_with_a_fluent_builder_api.nda.171b66eb0001dab94
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_nd_arrays.1a0c5c29d336d358f
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_nd_arrays.2a0ce325ff70a54ba
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_nd_arrays.319702c07c8e316d1
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_nd_arrays.48c382ef69ff56a30
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_nd_arrays.n.1179f39e0413c05f8
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_tensors.135390ea47bfacd5a
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_tensors.2d24daedab499a2fb
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_tensors.3d2bace9d1733ba7d
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_tensors.46b755ff0cddefa5f
Kotlin_Compatibility_Unit_Testing.we_can_use_the_subscription_operator_to_slice_tensors.t.18476849037210f00
Script1828beb24530a7a0f
Script1d6355eff8b3802fa
Script1cc34dbd59bb00f4b
Script15f35ec66628e95a0
Script1531f572184f58440
Script17d8354642631caaf
Script1799eb6cefc59341f
Script19fbb45fad423858b
Script1ca9d6e645ed22800
Script11c32e6427507dd0c
Script1272fc3a9feaf88b0
Script1767148cb3f89d22f
Script1e9aa1c64e6494645
Script16b8e5b57ed9507a9
Script1179e0757e0c22f34
Script11d9f13c31d263056
Script11e2f04eda9919024
Script1598cab80d39405dd
Script18ad7955763531a7c
Script14fa194a69ba02977
Script1a36d3acf3258c0e2
Script13e05b6ca5d1c3801
Script12e2327a6d8703e25
Script123ead7eb68672cc5
Script1519d5ede59b9a437
Script180ef04ecbc764d92
Script1de4f6640f52df75d
Script1._run_closure1695c7d8f5a07d7f9
Script1._run_closure1ff0cb72560a64966
Script1._run_closure183f928d09a721a47
Script1._run_closure1._closure262d10e315287b3ef
Script1._run_closure1._closure2126ba04753e69aed
Script1._run_closure1._closure2._closure1019368b80ee3e8c11
Script1._run_closure1._closure2._closure10._closure16f8c4ac58027d6564
Script1._run_closure1._closure2._closure11121f4aeca3f7c4e8
Script1._run_closure1._closure2._closure11._closure173bfec92ec010a050
Script1._run_closure1._closure2._closure127080c0ee7337860c
Script1._run_closure1._closure2._closure12._closure18895698cae1e8c06d
Script1._run_closure1._closure2._closure13fee9479b8731c364
Script1._run_closure1._closure2._closure13._closure1980931d62b6804a89
Script1._run_closure1._closure2._closure36c727c31b01ecb74
Script1._run_closure1._closure2._closure457d27ae045f6367f
Script1._run_closure1._closure2._closure58d9b378d916cfcec
Script1._run_closure1._closure2._closure5._closure80807ecd623493f15
Script1._run_closure1._closure2._closure6b9d148ff4ea7fa7d
Script1._run_closure1._closure2._closure6a4e926ab384e274e
Script1._run_closure1._closure2._closure71a562b874d6c29cf
Script1._run_closure1._closure2._closure84c04010a96704dca
Script1._run_closure1._closure2._closure9a8f59006e1654906
Script1._run_closure1._closure2._closure9._closure15724166e2f5ac71d7
Script1._run_closure1._closure3052f640199424dbe
Script1._run_closure10665b690935023ad5
Script1._run_closure11a3194154dff68d3a
Script1._run_closure125cdf39b2e0eb550a
Script1._run_closure13f7c088fc4fc5d256
Script1._run_closure14d67b9733a273afbc
Script1._run_closure151339bf6e48871853
Script1._run_closure16478a01d60ee3ba07
Script1._run_closure1782c8298be4170de8
Script1._run_closure18af89f817acf63bb7
Script1._run_closure19f218a10a69afb6cc
Script1._run_closure22298d97b6ae39bd1
Script1._run_closure2019d08684b8c2353a
Script1._run_closure217742684aece8c14a
Script1._run_closure226f809d4669eea7be
Script1._run_closure23011273883dc453ce
Script1._run_closure24139b3dbde869e4fb
Script1._run_closure25dc6ade8899e6af54
Script1._run_closure2623aca66ea6fb7764
Script1._run_closure27e6ee8e334c0fc08b
Script1._run_closure2860da99f17d978cb0
Script1._run_closure29a598b1ac97633b5f
Script1._run_closure31bdb452885405fc9
Script1._run_closure307dd52bd42e08d0de
Script1._run_closure31daea424697ecf548
Script1._run_closure32252c3aa0a8f12d78
Script1._run_closure33e06e12fd42059a97
Script1._run_closure346aa0cb6cd6ca9d19
Script1._run_closure35afe2e3313c3e2af6
Script1._run_closure3650249bd70323f2c6
Script1._run_closure377309cec5ac443273
Script1._run_closure3872cfa91db2a6020a
Script1._run_closure39fe17020aef3e78d2
Script1._run_closure4b51190930b2b0381
Script1._run_closure40cc4ae270f4389067
Script1._run_closure41204eab1faa723f59
Script1._run_closure58c520cc0e488c799
Script1._run_closure6c796a834d46c8bb1
Script1._run_closure701add5b07e2a90d1
Script1._run_closure898e5a87606d462f5
Script1._run_closure9a1a63425e977a6ed
SpockConfige667044141866078
SpockConfig._run_closure14dc401912ebd2e84
com.athaydes.spockframework.report.IReportCreator.createReportFor30a29a86ba99f025
com.athaydes.spockframework.report.IReportCreator.done.0834afc8093b2ad08
com.athaydes.spockframework.report.SpecInfoListener5857e9a1f427ff8a
com.athaydes.spockframework.report.SpecInfoListener._afterIteration_closure3a4a4c184982dea5d
com.athaydes.spockframework.report.SpecInfoListener._beforeFeature_closure16ab04f945bbdf801
com.athaydes.spockframework.report.SpecInfoListener._beforeIteration_closure2ab834afc9c442023
com.athaydes.spockframework.report.SpecInfoListener._featureRunFor_closure6ab2773020be8b94d
com.athaydes.spockframework.report.SpecInfoListener._featureRunFor_closure6._closure777715652026c2e8a
com.athaydes.spockframework.report.SpockReportExtension808bdb50c9c6bf88
com.athaydes.spockframework.report.SpockReportExtension._start_closure106b19565366181bd
com.athaydes.spockframework.report.SpockReportExtension._start_closure2a5d77ab07bbb3453
com.athaydes.spockframework.report.SpockReportExtension._start_closure3d0e456ad38e63eeb
com.athaydes.spockframework.report.extension.InfoContainer2d38b4e9beaceb3d
com.athaydes.spockframework.report.extension.InfoContainer.getHeadersForf56f1749b7e5c7a7
com.athaydes.spockframework.report.extension.InfoContainer.getNextInfoFor.0ee072e976591c1f8
com.athaydes.spockframework.report.extension.SpockReportsSpecificationExtensionee0da3c2a3f22b8f
com.athaydes.spockframework.report.internal.ConfigLoaderbfbb54f9cdca8dd8
com.athaydes.spockframework.report.internal.ConfigLoader._apply_closure17041b66044bd2276
com.athaydes.spockframework.report.internal.ConfigLoader._loadDefaultProperties_closure683f9fbb0ab1a88fc
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure2fb92520300d06192
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure32735949cf6deb78a
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure4b2bc771cbe35fc17
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure5a9f8e58e9de59f73
com.athaydes.spockframework.report.internal.FeatureRun6d8b7945065cdfac
com.athaydes.spockframework.report.internal.FeatureRun.copyFailuresByIteration9b9ba64d9e02940d
com.athaydes.spockframework.report.internal.FeatureRun.copyTimeByIteration.0a6829d657766dd20
com.athaydes.spockframework.report.internal.FeatureRun.iterationCount.19d3ab3601bc9f845
com.athaydes.spockframework.report.internal.ReportDataAggregatorf466e3d9dc1fafe0
com.athaydes.spockframework.report.internal.ReportDataAggregator._getAllAggregatedDataAndPersistLocalData_closure131d9a4729568af4b
com.athaydes.spockframework.report.internal.ReportDataAggregator.getAllAggregatedDataAndPersistLocalData9271c221b8210929
com.athaydes.spockframework.report.internal.SpecData4b4dd48fa38e0eae
com.athaydes.spockframework.report.internal.SpockReportsConfigurationf82b9de9da5696c0
com.athaydes.spockframework.report.internal.SpockReportsConfiguration._addSet_closure1c6c9dd74bd981faf
com.athaydes.spockframework.report.internal.SpockReportsConfiguration.getPropertiesfabd25bc834365ce
com.athaydes.spockframework.report.internal.StringFormatHelperffd70e4008d282e4
com.athaydes.spockframework.report.internal.StringFormatHelper.toPercentagefd1f065e01fd8ad8
com.athaydes.spockframework.report.internal.StringFormatHelper.toTimeDuration.0d2973692b2d2d17e
com.athaydes.spockframework.report.internal.StringTemplateProcessora3bdea4bde6b19ba
com.athaydes.spockframework.report.internal.StringTemplateProcessor.process706ecfb8c59fb8b3
com.athaydes.spockframework.report.template.TemplateReportAggregator81a56dc798bad15a
com.athaydes.spockframework.report.template.TemplateReportAggregator._addData_closure18d802202557032af
com.athaydes.spockframework.report.template.TemplateReportAggregator.addDatad659261b50684fbf
com.athaydes.spockframework.report.template.TemplateReportAggregator.writeOut.0682304aa20f5cecb
com.athaydes.spockframework.report.template.TemplateReportCreator3619ff476976ba22
com.athaydes.spockframework.report.template.TemplateReportCreator._createFeaturesCallback_closure1f6d270b751918e85
com.athaydes.spockframework.report.template.TemplateReportCreator._createFeaturesCallback_closure1._closure7cd7fe8a65567b9b9
com.athaydes.spockframework.report.template.TemplateReportCreator._createFeaturesCallback_closure1._closure7._closure85e503a6a367278f9
com.athaydes.spockframework.report.template.TemplateReportCreator._handleUnrolledFeature_closure2b098d8f83ea56dfb
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocksFromCode_closure5f5a30c2ff06e4e58
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocksFromCode_closure60a48740f52266aab
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocks_closure3451d2392f65d0acb
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocks_closure3._closure9f2d3b6c96458ce69
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocks_closure4c0ec22d930e9d967
com.athaydes.spockframework.report.template.TemplateReportCreator.createFeaturesCallback.0b553690078734d77
com.athaydes.spockframework.report.template.TemplateReportCreator.reportFor38fb49bfbf195d3c
com.athaydes.spockframework.report.util.Utilsefca4577860c077c
com.athaydes.spockframework.report.util.Utils._aggregateStats_closure56ada4b76c2e89381
com.athaydes.spockframework.report.util.Utils._collectRoots_closure119385a069bd8aca95
com.athaydes.spockframework.report.util.Utils._computeErrorCount_closure4b087e85ee55be283
com.athaydes.spockframework.report.util.Utils._countFeatures_closure7ad448b1dc350fe6c
com.athaydes.spockframework.report.util.Utils._countProblems_closure85bbf4bce040743ef
com.athaydes.spockframework.report.util.Utils._countProblems_closure8._closure15803f0ba99a81bdc7
com.athaydes.spockframework.report.util.Utils._getSpecFile_closure1209cf674e800e9853
com.athaydes.spockframework.report.util.Utils._getSpecFile_closure12._closure17089fc9a2213d7352
com.athaydes.spockframework.report.util.Utils._getSpecFile_closure12._closure18a903b8a852cf577a
com.athaydes.spockframework.report.util.Utils._isEmptyOrContainsOnlyEmptyStrings_closure6e6c994f45b758978
com.athaydes.spockframework.report.util.Utils._iterationData_closure107d64ce2b6725f6bd
com.athaydes.spockframework.report.util.Utils._iterationData_closure10._closure16b44504f50fcf3793
com.athaydes.spockframework.report.util.Utils._stats_closure1fdacef64b44d0c92
com.athaydes.spockframework.report.util.Utils._stats_closure20225c45690427445
com.athaydes.spockframework.report.util.Utils._stats_closure394387d0f804cc897
com.athaydes.spockframework.report.util.Utils._stats_closure3._closure1498c1ddda37bb0009
com.athaydes.spockframework.report.util.Utils.aggregateStats.19126afc4ba0a66a85
com.athaydes.spockframework.report.util.Utils.convertPropertya2c1f0a364f73e46
com.athaydes.spockframework.report.util.Utils.countProblems.11c58d8aac9f60908a
com.athaydes.spockframework.report.util.Utils.createAggregatedData.15451a3c2b8dab2000
com.athaydes.spockframework.report.util.Utils.createDir.4e05bb5164f094e3f
com.athaydes.spockframework.report.util.Utils.featureAnnotation.088294421d0142dbd
com.athaydes.spockframework.report.util.Utils.featureNameFrom.17d071dacaa2b288de
com.athaydes.spockframework.report.util.Utils.getSpecClassName.1fcb328a43636d934
com.athaydes.spockframework.report.util.Utils.getSpecClassName.2089bc110d07b6e92
com.athaydes.spockframework.report.util.Utils.getSpecFile.51793ae202e0ba02c
com.athaydes.spockframework.report.util.Utils.isEmptyOrContainsOnlyEmptyStrings.16c56b1d0d36f0cb05
com.athaydes.spockframework.report.util.Utils.isSkipped.12aaa4e77f776c5ef8
com.athaydes.spockframework.report.util.Utils.isUnrolled.105e28a413fc2642f3
com.athaydes.spockframework.report.util.Utils.iterationData.13752d70885ce98675
com.athaydes.spockframework.report.util.Utils.nextSpecExtraInfo.14380dc7b5caf21dad
com.athaydes.spockframework.report.util.Utils.nextSpecExtraInfo.1806d0c1e7eeb4e95e
com.athaydes.spockframework.report.util.Utils.specAnnotation.82fb6888a7f1e602c
com.athaydes.spockframework.report.util.Utils.specHeaders.994c8876c7b56582a
com.athaydes.spockframework.report.util.Utils.specNameFromFileName.35231b49f5e1adfbe
com.athaydes.spockframework.report.util.Utils.stats.65f20d8e0516beb7c
com.athaydes.spockframework.report.util.Utils.successRate.7af8fc12259d96bfc
com.athaydes.spockframework.report.vivid.BlockCode18a7f6f0d7908250
com.athaydes.spockframework.report.vivid.FeatureSourceCode3e7ff1b40d5d2bb9
com.athaydes.spockframework.report.vivid.FeatureSourceCode._addStatement_closure16975a978b3e0a162
com.athaydes.spockframework.report.vivid.SpecSourceCodeaba57e3efee1f24f
com.athaydes.spockframework.report.vivid.SpecSourceCode._removeIndent_closure1d44e31fef15253c5
com.athaydes.spockframework.report.vivid.SpecSourceCode._removeIndent_closure1._closure4c281b9a7c3962132
com.athaydes.spockframework.report.vivid.SpecSourceCode._removeIndent_closure2ce94fdfac7c4c50d
com.athaydes.spockframework.report.vivid.SpecSourceCode._trimLine_closure3a212ac6c1ff1083a
com.athaydes.spockframework.report.vivid.SpecSourceCodeCollector54370b7b1aba645f
com.athaydes.spockframework.report.vivid.SpecSourceCodeReader2a918048b3892b0d
com.athaydes.spockframework.report.vivid.SpecSourceCodeReader.getBlocks.0fbd812f3fa691eb9
com.athaydes.spockframework.report.vivid.SpecSourceCodeReader.readc273b9b2fa1b7c32
com.athaydes.spockframework.report.vivid.VividASTVisitor5c07c3aa115b856b
com.athaydes.spockframework.report.vivid.VividAstInspector8735409442668193
com.athaydes.spockframework.report.vivid.VividAstInspector.AstSuccessfullyCaptured949140abb9b17454
com.athaydes.spockframework.report.vivid.VividAstInspector.VividClassLoader3b3deb543025f79e
com.athaydes.spockframework.report.vivid.VividAstInspector.VividClassLoader.1136cd67ad12b770a
com.athaydes.spockframework.report.vivid.VividAstInspector._getSpecSource_closure156fba340bab06dc5
com.esotericsoftware.kryo.io.Input82caa4ac8d2c9ad6
com.esotericsoftware.kryo.io.Output2e152e7951e62ecf
groovy.grape.GrabAnnotationTransformation374302d13de9566a
groovy.json.DefaultJsonGenerator489162b4124474bc
groovy.json.JsonGenerator.Options30cfb29464255a9a
groovy.json.JsonOutput16e22b9d1d296b2c
groovy.json.JsonParserTypeded2a2b32fc45e3d
groovy.json.JsonSlurper0c7c4c7dfa44324c
groovy.json.JsonSlurper.1f6bbc9ad49a1c88f
groovy.lang.Bindingad6934f76057356e
groovy.lang.Binding.setVariabled8c889f14a95773e
groovy.lang.Closure97ae1b74def76e89
groovy.lang.Closure.1d6ff0452538ab80d
groovy.lang.Closure.WritableClosure783afc7abc351f56
groovy.lang.DelegatingMetaClass9bbb650717e56edc
groovy.lang.EmptyRangee03c2552a24441a7
groovy.lang.ExpandoMetaClass3a228f18070aa0a3
groovy.lang.GString460b9c30e9601783
groovy.lang.GString.1dbeb57fd0cbf4ce7
groovy.lang.GString.plus5aecc92247675af9
groovy.lang.GroovyClassLoaderd4d736353899a9ce
groovy.lang.GroovyClassLoader.1351d8d97af8cb820
groovy.lang.GroovyClassLoader.2a7f8c24d0b4a0fca
groovy.lang.GroovyClassLoader.ClassCollector1166da2a140aea36
groovy.lang.GroovyClassLoader.InnerLoader280521037df96bc2
groovy.lang.GroovyCodeSource9ab91893450b67d5
groovy.lang.GroovyObject94784ca717e7c56e
groovy.lang.GroovyObjectSupport5624a8c57ac25a8c
groovy.lang.GroovyRuntimeExceptione9d68db1294fdce8
groovy.lang.GroovyShellf2e61914a929d7e2
groovy.lang.GroovyShell.evaluatedc2c21d8ed07de44
groovy.lang.GroovySystemb619a6cfadf1fc00
groovy.lang.IntRange6d7f1111ec97f360
groovy.lang.IntRange.IntRangeIterator434f091ef2404688
groovy.lang.MetaArrayLengthProperty24dcfdf7fd00bdb8
groovy.lang.MetaBeanProperty8d8113fc12ab2379
groovy.lang.MetaClassImpl45b21b6f1ab4394b
groovy.lang.MetaClassImpl.12052bb1ce7559d6e
groovy.lang.MetaClassImpl.1MOPIter01bb253e7f213f16
groovy.lang.MetaClassImpl.2cb04bcf4d37e1013
groovy.lang.MetaClassImpl.4007b23aed0a5ef98
groovy.lang.MetaClassImpl.6118fe168bc480105
groovy.lang.MetaClassImpl.DummyMetaMethodeb39a31b8ee123e2
groovy.lang.MetaClassImpl.Index685df41839976cb1
groovy.lang.MetaClassImpl.InvokeMethodResultecff7f063995f39e
groovy.lang.MetaClassImpl.MethodIndex72938e5859c0437a
groovy.lang.MetaClassImpl.MethodIndexAction036c305156fb06ad
groovy.lang.MetaClassRegistry.MetaClassCreationHandle059d54d5a5548aa0
groovy.lang.MetaClassRegistryChangeEvent584c183048bcaf59
groovy.lang.MetaMethod05b1a3f8b75e1a73
groovy.lang.MetaObjectProtocol.getMetaProperty21d7f740c7890cb9
groovy.lang.MetaProperty08c1ee46ca158a59
groovy.lang.MissingMethodExceptionc4f9205b4837e817
groovy.lang.MissingPropertyException4ce9baaf953c78a1
groovy.lang.NumberRangeae385b9554a8866d
groovy.lang.NumberRange.StepIteratorc3f2205d950481c6
groovy.lang.ObjectRangede029d6f51e791d2
groovy.lang.ObjectRange.StepIterator3264ee28888c19eb
groovy.lang.Referenced69810b2b7e75a6f
groovy.lang.Script1d4baccfd43a29b3
groovy.lang.Tuple8bf53120b8d6587c
groovy.lang.Tuple236cf8a0f3d1c10f1
groovy.lang.Tuple330860412ec292fb3
groovy.text.GStringTemplateEngineda2732e22822c0be
groovy.text.GStringTemplateEngine.GStringTemplate0693a502a2a048a8
groovy.text.Template.make9f93b59618675074
groovy.text.TemplateEnginedfba0ddb64710583
groovy.text.TemplateEngine.createTemplate1f36ac9534f0cb14
groovy.time.BaseDuration478c895a14e1489a
groovy.time.BaseDuration.toStringe7eb2487fbc24018
groovy.time.Durationfb1ec0c692e053ac
groovy.time.TimeDuration9c393b32e401db00
groovy.tmp.templates.GStringTemplateScript10cb94a7b4f4d4596
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure109df242f748ceae2
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure10baf011e550e62e48
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure10._closure22e43beebece2b1791
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure10._closure2341c74fb5cfdc8861
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure11d78ce337100053cc
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure11._closure24602e9623e3551513
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure11._closure25c8c613b8672fabc4
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure1294d6a97c090c5c82
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13e6a64d9438631674
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure26d379b99ba61ab9a7
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure27e7ccb3f0fe42ded8
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure299e583f770fc2a854
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure30d6dec33a860fe1ec
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure31a9efaa8d93999704
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure2a40628ee673912f2
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure320d304dcaa48d372
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure4950cffbd89a98218
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure52050f86879059d77
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure5._closure14dfa459b7d84e461a
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure634fed53d302d6a4a
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure704ec8900017f06ec
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure7._closure15cb1a098a17122797
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure859f967a69b9dcdc0
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure98ffa56d9362c1c65
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure9._closure16f61c73e03adff674
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure9._closure17a56a6e480ce84abd
groovy.tmp.templates.GStringTemplateScript103cf66256a0279c96
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1c4625cb7ba9c36ae
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure10af643fb832ae5d08
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure11272dbb8b0fde0187
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure11._closure24a7bfdcccb73d81c6
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure11._closure25c537436f9e0e9291
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure1242a0c5901f43c9b6
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure137b1ecd1f2045c085
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure26cbe8c90b86eceb74
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure27dc0b4e78b89a0570
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure29ba718f278fd6e959
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure3083260cc96c59f956
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure319233af24bb55dbb7
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure212786acd3f9b88c9
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure3bb16a190ff47f2c2
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure48903a3ffb760b4b5
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure595580b2cdb79a29b
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure5._closure142ff943fe46b0a554
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure632cb201667b17ead
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure76c3df2d94b7bf6e8
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure7._closure1565526ffbb02268c2
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure800515abcd928c3b2
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure9241ea127d3d0c191
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure9._closure1691c1aa96a8e31793
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure9._closure17a3f843ac3316301e
groovy.tmp.templates.GStringTemplateScript1004ad44f10b5bb01f2
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure19569e66bc900dd40
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure21b9a9c19c49f7508
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure39cc2e54b26c50e73
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure3._closure7918286368f3aaa64
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure4081e507b261c9ad4
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure548dba59176cddf94
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure5._closure8bae8db3d119b0f1e
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure5._closure98082528f57084b23
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure6e8065c19a13290c4
groovy.tmp.templates.GStringTemplateScript117373bda44706a6f6
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure115b9164ec45a2a4d
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure1037ab20fd43976653
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure115fe574944aaf38bc
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure11._closure249be48b29656eaab7
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure11._closure257a8a1ff5852f8696
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure125b8edeab4747b9fb
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure130a8260d9c6eb7bd4
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure26eaab980e54721a9c
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure26._closure322c923eab2b172dd7
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure2727ace48615e3a090
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure29c3a36798f2d0f47c
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure29._closure33276637ae848d48bb
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure30dbc9504e36692d94
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure3191611837b62949be
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure2689933b6ae2e7d01
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure3cbc465452753521d
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure43363353159e9e94f
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure5a17d97150b4a1a36
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure5._closure141e9d3e709627d762
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure65cad655aecaad20f
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure7838f3cfe92c73c7d
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure7._closure155af50b77252d796d
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure8a98d80c101b2eb9a
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure9c3c7382cd8aa5a44
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure9._closure16d5d5d4620a7acbbf
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure9._closure17309fa596c044db2c
groovy.tmp.templates.GStringTemplateScript12a3fdddb36e65e856
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1d7d4c94547100f69
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure102efa0132d0dc2bbf
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure11d6bc25b5853c73f1
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure1270fcf3e6af4b292c
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure1398279692ed18b627
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure26896e6b0023d108a4
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure279b441b85e2694eb1
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure2949d45e5975dad313
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure3032f8b5c7d83850d2
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure319496c102a1acffa5
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure2e7bad83a1cf06359
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure35ab3283b4f6eb37c
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure44dc28e626a720f40
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure5fd13335f7b1ed3c1
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure5._closure144d31b8e3e79e4138
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure6ee07aa8f718627e9
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure703586e96f80263c3
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure7._closure151a1ca6e29a3c4b9c
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure8e3e8ee47681c93e3
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure95bad9331c525f63a
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure9._closure1619e9577fedd0afcb
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure9._closure1735378fd9d5b3e67b
groovy.tmp.templates.GStringTemplateScript13ec7802418944d236
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1060f83bc39d6138a
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure10b6351e77a1e510e4
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure10._closure22b606d4d7f83d9f9f
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure10._closure234f176bbfe7ed04dc
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure11ae74eaaac04d4aca
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure11._closure24e35224e2c1c8fc55
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure11._closure25b5f0a6c1b36dae99
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure1269d2e8ddf74f5961
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13e9bb3b540bb60d76
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure26a82d3a05f14ff94c
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure2760e3b17b4f10eb51
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure293006b6e608dcce36
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure306a17e94082088410
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure3197c47611acd06dac
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure29d5b81418d459691
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure32a61ecee977a13a3
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure4f7a218ac84fb52ba
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure5c936af66ab2d6b6c
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure5._closure147c55c56d3709330e
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure68061efc3fa9d8b4b
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure7eceaa0b121bea956
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure7._closure1525bbc26e0f335a33
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure84a34343ab086bbcb
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure9bc740a3ace5f6def
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure9._closure165dfd298b4f4973e7
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure9._closure17a65069e326e10d49
groovy.tmp.templates.GStringTemplateScript14b2e11d9d3ca37517
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1e30f775241844520
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure101c5842adf64ab067
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure10._closure220386736ec7844fc8
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure10._closure231125fc79a60a68cb
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure11740e87f61a1ae56a
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure11._closure2456d2835bfe712c02
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure11._closure25ebc23107f28ac28e
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure122618a97d7f520882
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure130d6c7a04baff2dc0
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure264ee58d1ccc972cd4
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure26._closure32f4721ddb1048f73e
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure275295e5820d7c92f2
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure282c93ffaf29cabf56
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure29ed3a2dda7bce9dcc
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure29._closure330a1e1d61711b7e5e
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure30509b7ed4049aaa5f
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure319f7973688ea79393
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure249fd0f23794c5fe8
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure3c85db2c79f1571bf
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure4b081f8c40d45c35e
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure545ce7bcb9bb7402f
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure5._closure14ea68b5c504ed6d8c
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure63b5235244bdfcc24
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure7b2f6ca462d88dcbe
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure7._closure159bcffdc9e41e2e7e
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure87722334bbb406311
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure9db78c50bfe3aaec7
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure9._closure163190514422846722
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure9._closure173e67db47fe5d9cd5
groovy.tmp.templates.GStringTemplateScript15fd64c26fdb824f77
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure132d43dab3f4259c3
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure1084975de887738b3c
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure10._closure223fdd248b15d764b9
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure10._closure23ae98a0e3bd2b7ccc
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure110cc648e95f6bdc51
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure123f36b246275678cf
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure137cf0d7c25c519691
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure266fa6dc191e09dd3c
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure26._closure32cc4742c1b75bcfa3
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure27a9324f7ca0053712
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure2994e8c56506c880e9
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure29._closure33030615f71560253f
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure30087422535eaa7e9d
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure319c2bc47b83db019a
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure2331c5658e8f9aa20
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure3b88f76124701d160
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure40ae16e0ae3cc9ea4
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure571ebe7f24b84f882
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure5._closure14db0cc84bd47a1fba
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure655347068c0c46086
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure75d440461f434162b
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure7._closure15a468994571113fd1
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure8defee93663da4b39
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure93ca15c00f5403512
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure9._closure1675842fb0801dbb0e
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure9._closure17ad003d7d0d0f77e7
groovy.tmp.templates.GStringTemplateScript162deaa278f2e101d7
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1f0b9e2a0bc087ce7
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure109dc67c271438c6d0
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure10._closure227b30dca56322192a
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure10._closure23de5f454d904840c4
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure11859f19c890f8971c
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure1214449f0bcf5ae818
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13ee55218977a25b62
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure260c632f1769aacf04
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure26._closure328418a3ee5e6e8604
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure2715dab07f578fd933
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure291e9ffca481c2a786
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure29._closure33182e0c4db9edc89c
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure30e145c7dab0fb03db
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure3199dc1d4e945eb781
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure2bc3fbdd45a27b478
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure329f83b6c2f3c3001
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure47440d559d05778ab
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure52d8543b83bd03175
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure5._closure1488a04ed8a5c389e0
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure6e79ebfbd5de89560
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure7dd9356099ef14995
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure7._closure15e48134d0ce000d20
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure8949b87b00a743340
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure9a4cbf71de8cf996c
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure9._closure16b9b8acad67b7df7a
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure9._closure17a8a8173218f84ab0
groovy.tmp.templates.GStringTemplateScript17626f7d8a15c03bb7
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure12162a859c2ce6004
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure10050963626501fd8b
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure10._closure22476b8b40b171325b
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure10._closure2361e219d78b6954c3
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure11fd57d6d7d589ae27
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure120d6a8430975e9855
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure139fc98c4f910ce033
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure262d207e12bb343eec
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure27ee7d1a81faf67cd3
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure29674d141bfcc4baa3
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure30b9aa9b5deacbd719
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure319a8eaa5d99222588
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure2c6dee4afcb9241b0
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure3592affb9f72890de
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure4ce2043973ede2551
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure519a0df81ebe389d8
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure5._closure14b9c433567554fbd6
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure689f8faf1d6f339c2
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure73221982e474d8300
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure7._closure15db26505c5b0f1c8f
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure83d475dcdd2ee1b68
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure943126e16e3b502b9
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure9._closure16fdacd259c52e0356
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure9._closure173bcff108ebaaa182
groovy.tmp.templates.GStringTemplateScript1890d89dc1992e4f95
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure18ab80b7c4cacd1b2
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure10791cc593bb6787d7
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure10._closure22a03193d71c51b985
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure10._closure23623a6ac1138698ea
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure11816bc3712457c85d
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure128bd01c4adf604bde
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure1397fba32815301a0f
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure2671f24125121b6435
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure277136198dd3572a75
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure2914e6cadc67e60073
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure30945ce8f3bddf5f45
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure3188a617bcd0b14bff
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure2a572a111b234268b
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure35d80873e3fe2f438
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure4fa071588c32a5b63
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure58474eae25ae467f2
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure5._closure1414daaf88c20b34e5
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure621f90a723f6c1bbf
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure761ab83e7869da245
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure7._closure1528694b9f185ae5bb
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure8eeb789521df982f4
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure96ad2697f88041f3c
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure9._closure1661625d33bc2df6f0
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure9._closure1728c7727ba9816989
groovy.tmp.templates.GStringTemplateScript19df5d42337e0f75f5
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure15b634185326acd51
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure10e1d3dad6ca5ebc8c
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure10._closure229c6ac432ce0292f4
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure10._closure23dd87365b08a78ced
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure11f9a30c6e6126f166
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure1292fe077187643b93
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13e6670eeef39ea15e
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure2650b11020c08595dd
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure278a91b3737e2e8f95
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure296d3422631ae01d56
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure30ccb3b474e7ef8b87
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure318bf4a0afddcdd9f6
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure2df93f86a2381d343
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure32d5243ebe7f654e7
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure4406783462da30699
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure5b05176db8ad7df5f
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure5._closure1425bed206129c46d3
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure64f9f4f3eb477b71d
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure78e194dc05f2168d0
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure7._closure1517ce2f138d55f414
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure8476b532fc563aadc
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure98d0bf074837e84e9
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure9._closure16257623c71eb42adc
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure9._closure17bba094415ad382bb
groovy.tmp.templates.GStringTemplateScript2b6893a42d67852d5
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1c2ad27f03723c01f
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure104418ddfa0047906f
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure10._closure22a9a8e9e0ac6943d9
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure10._closure23a9d062525d5aba90
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure11e5620bfb0f50f272
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure11._closure242dbd917d8117415b
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure11._closure2520d13e5ff5a99935
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure1216f27de43e9ccf5b
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure132db4aac66e4ce630
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure26b2d3f6ba70ee347e
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure26._closure32f869f78eec87bc5e
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure27de722ecac72d4a8f
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure29a1c4993a6c2cdb8c
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure29._closure3375533cf1be5db68b
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure30abfa3e2b6b33799d
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure31e659355b2efb193d
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure22d7ee1a040fe47dc
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure31fa7f3ca78c152a1
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure49dc8dce7792a12ab
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure53e74877494a32195
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure5._closure1449152c4282821509
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure6f769f8aab5efdd1d
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure7fdce1cc6ab160308
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure7._closure15880e7894e2ba081c
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure80d4e6c72913dd04d
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure9e61d1994225ec019
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure9._closure161d069a53d328a332
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure9._closure171c001a759bca15ca
groovy.tmp.templates.GStringTemplateScript20d226ec36b70effd8
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1f4603168b11f7c8b
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure10263d6e99fd3d1645
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure10._closure22337fc101a0be1771
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure10._closure23322049f464a35be8
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure114154e2da2e1192cc
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure12e99bb7bd52abc526
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13e05c68e96b6e3348
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure26eb0b0cf8889b4866
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure27eb07a687bb6d8f9e
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure29e96bf81e4e51e37e
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure30ea6f3d2ce5b7a82b
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure31866658fd8e425e01
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure268774926b32956d7
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure3e037d6dd812fcf23
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure480ed0244e4532f53
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure5081465889109f652
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure5._closure14564aef78d5c11cc2
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure6b5d98ad9b22c5258
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure7919d258b231133b7
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure7._closure15fda28656259d79f0
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure84d2b3fd25f416dca
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure96da6cb8ccecd4e3d
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure9._closure16d6cd9615b504bdf7
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure9._closure17d0edeb867c03c723
groovy.tmp.templates.GStringTemplateScript219da333c4502fc5b8
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure125bb7b91cfd96068
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure10bef271dc8c042d1e
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure11399c2dc56b60abf7
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure12f0b5ac860aafb56b
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure1391c0c52f8dc08819
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure26ca485dfd5a05b98e
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure26._closure32c6ca614a04fe189e
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure2710a00c7916142a7e
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure2990b910a13357fe5b
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure29._closure33cbad1fb73e21c556
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure30b28061abbf877ce9
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure318534efee833ecc08
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure21296105d229ca31f
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure390e51208593b6ffc
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure43a8d948a0ada72a9
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure53c31f9b1413a4eff
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure5._closure14672e92f605566ef4
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure6dbbfcf953937fefa
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure77e2febacfaadf922
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure7._closure15c205e2dab092685f
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure8e4f7e5af87db45e2
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure98a7f5287c5b7d5e8
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure9._closure1692d9e8e1179d61db
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure9._closure17438a0dbc8f512c11
groovy.tmp.templates.GStringTemplateScript224d2d53d3794c8b18
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1e7d6a49a4c93454c
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure10a7a350131f4f60f2
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure11b0c57ce4a4f3e0ba
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure11._closure241e9d9eff3ded2259
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure11._closure2507bd3dbe0661d9a2
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure12dbc781cbe2a325bc
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure1303653364a63345ea
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure26a98daef32da6abb6
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure26._closure328e958065edcb5139
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure27ac48f37ae19ec45f
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure291ace2960b45dd934
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure29._closure33d085060d92ac28f5
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure305bb1842251d601af
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure3180c336db94bb7a13
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure29db5fbd19042bd47
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure301925f7631068e9d
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure4442c2fd9394194a6
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure5605f5dfb316e8708
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure5._closure143482146574eff8ae
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure669150040a41b0b1c
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure7fef8b9c49068a69c
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure7._closure1582ec4f4f0f835aae
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure8ae928b29ee753d9b
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure91215f99ad8387996
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure9._closure165ee56bfcf03705af
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure9._closure17462227f39aa61146
groovy.tmp.templates.GStringTemplateScript2302a88c219e6db178
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1360dee63325559af
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure103f6c4f566e765ba9
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure11c80db3fbe182d981
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure12c2e99af0baa755f1
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure1372f99ea2409dfebb
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure2688cefff6ff385a5e
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure26._closure32b6a0df7f4ad869a4
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure2757ef59844ce761bf
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure29631cc1dfc95bc411
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure29._closure33d99d0e9bf6d77394
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure30035ed8a50be6d56d
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure31839181c899c7e81a
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure2e754a2aa01f7488f
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure371409ba3e9122e42
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure4fe4cb917d7c8c95c
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure5547ac1c2e15d3fa5
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure5._closure1405e669eba4788a98
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure60773450c2f00a7be
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure7114a77e349d46c09
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure7._closure15bd4b2bc39a8c4b01
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure8074e515436ef15b3
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure9f5cc6091d342e243
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure9._closure161af1150852aed983
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure9._closure17d545c1c969f4fa74
groovy.tmp.templates.GStringTemplateScript245c3193fd2b8a1659
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1d30d1a8d4a070f05
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure109501138c39d9fb2a
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure10._closure22c2129e96e9f2bab5
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure10._closure231cd53b9c08270bf7
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure111277dea73bd57621
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure128d23db5032ba0412
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13962edff2f1d4de0d
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure266e0648efc2e08fc6
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure26._closure321e2a423a3fa1c277
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure2765990d7d0e8b181c
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure29be205ae3ba4997eb
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure29._closure33e6d53578cbb7f3b3
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure3039d24f318d74fb22
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure318b2c84b1bbb01625
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure233f22cc8f5fe81f6
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure3937cc58ae17d4c5e
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure4b96f597f5e7658b8
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure5d882156fd1c714e6
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure5._closure1493db1943979cd41a
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure6bc409feb9e42e0d1
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure74f561d1445e219e1
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure7._closure15033f146471a13f4c
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure83a5856253d29cd69
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure992c0afa0e327216b
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure9._closure16769c6dc73f63cd46
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure9._closure174d72736db1486be8
groovy.tmp.templates.GStringTemplateScript2513b44c0fccab2c39
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure102d6507434c113e6
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure100dce0cc948e0c071
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure116abf11b87ea44f1a
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure12940dc06b6abe745f
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13e7b27234177a655c
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure264f4519ea107e7e2e
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure26._closure32261f1d2098b2faea
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure279e3ea783a3f2bdfc
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure29c7f2b25cc74f8ace
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure29._closure33efcd3deeafcca8d2
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure30613d13b6d7442fe0
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure31887e33a2b6cc842c
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure2491375b3644b743e
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure3e3ae015f3969ec81
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure4030fcfb1b0ff0542
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure5eca7895601f4ac4b
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure5._closure14a2bf64cd470ba62c
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure6d226daa715594c73
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure7a0e4d3339c5ed374
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure7._closure153c9870e8e4ae2ee3
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure893848c58e5b3e541
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure9751936abe85dbabe
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure9._closure16328813339dfa116a
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure9._closure17de159557421a80da
groovy.tmp.templates.GStringTemplateScript26c33a2c18e5c86299
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1c0bb8f7fb78b36c2
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure10149f2d06dbab8d9d
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure11e3e64099b1370457
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure12bf7fed2682b2e488
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure137517847f3c89a8af
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure262c80eae467dd6c16
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure26._closure326e40fc0f7187b34d
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure2722d65880547853dd
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure294d858b9d4045ada1
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure29._closure33f4e5245403414571
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure30880cf63f391552a6
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure318d89ea97a1493237
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure2c6309e3fd6956a66
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure372d94c2151540de0
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure47dae74e28364e34d
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure5b0c92d1c71a065bc
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure5._closure14f113e25e36b23076
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure6608c15728875b995
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure72033815bf69b8cca
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure7._closure157c71dd7d5bbf1c12
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure8d9e1e2de8c1d9d38
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure9ed739db6f5d216c0
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure9._closure16feb4902e7a50751e
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure9._closure17dbbdbf1857edbd8d
groovy.tmp.templates.GStringTemplateScript278cbff3ea02e958f9
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure11160c586c94d2a21
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure108c503243aa92b6c6
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure119b2e8f86f4463d6c
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure12a651f61ddab694c5
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13048b29b9da2713fe
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure260dc3bbe1b5439dfe
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure27d971f27ef901f63d
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure29345763223d43b084
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure30d0e3aab863258664
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure318edb5d84ac35a03e
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure2bcd1c74447209fae
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure3020b88f48940ad3f
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure4c7cee22c6dedbeb7
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure584ecb125a193dd11
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure5._closure14c0779fd0e6254240
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure60eea503e036e1537
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure7cf814f7c2f27465f
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure7._closure1543d6b9f1ceb00dbd
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure8703d38a35487b510
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure90aaa04bdfea88d15
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure9._closure16baa0eedad8c9a932
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure9._closure1748da5922a4bf56bf
groovy.tmp.templates.GStringTemplateScript287e0813a18e072cdb
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1baba66a3472f9b97
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure10f04594b274f4cc9a
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure11e7129a2005985b16
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure1220eb6e679288474e
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure130cb906de5e1be9c2
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure26511184d61c6cc727
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure26._closure328f55c6859b74e4ea
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure27463af172d0a0a09b
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure2947fcbde5a6610a54
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure29._closure338a7553927980453f
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure30fd15d91634310e38
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure319cf3e065e5a6ce49
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure2df7d82fa3e86f895
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure306a1f073418ac9d9
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure4f3e9b4339019c085
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure5193884461094333b
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure5._closure146d69030e517a8d73
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure6a6eba0bdeaf1374a
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure79c0b54b5eef7671a
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure7._closure15b099a2328de5f489
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure8a3cdec3c9b902c8c
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure9236a03d495199090
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure9._closure16266e61b0a1ca5c94
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure9._closure175bd2da51e6949eb4
groovy.tmp.templates.GStringTemplateScript29318dcc53692616bb
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure16b612c5a39e98774
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure10688a8bf705cdf7c1
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure119fda553f40e9622d
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure1239c5755cca8c3703
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure137d25ab18b8b55293
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure267052d5d3cef236cf
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure26._closure32b760999f3c67dc77
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure27bd9d5b8c7dd9057b
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure293e2e555adb671771
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure29._closure33836d5b041dfb1e5e
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure30a5fa85916e01dafa
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure319fa15776e8da5c40
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure2a59cdb81af330d5d
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure3767334a6999e6906
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure4498922fd7e909d7f
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure52d1d187fc0a78b96
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure5._closure145c0d7e8081edff45
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure6c88de5f161ea9be8
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure773b99a92374bad8f
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure7._closure158f3ec6be18eae526
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure80a113641430a04a4
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure9c4b39adf9e630b45
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure9._closure16627a1f44035380b8
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure9._closure17c8b53c6b15c67586
groovy.tmp.templates.GStringTemplateScript34f66ea55a16b5feb
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1147cd94509b9264b
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure1081bf660f3027058d
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure1164c7ac40fa6092e7
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure12f8ee3193d3ecbe13
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13fb4508085c5649f3
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure26024a33a53d424f36
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure26._closure3284d4027624ec7ced
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure2759185a232ff7c6bd
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure2924b0fb014d76f53b
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure29._closure3327d1fd9fef026578
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure3080e66adbcfd8f1b2
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure31dccb4016ba25632a
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure255a95965a24374c6
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure30a8ba13836b9d210
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure40a74c22ed6ab62c5
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure5a468ad80cfc14a34
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure5._closure143b7a00ee4b39dbf8
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure6261b1c27c951b02f
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure7aad06f84cd310054
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure7._closure1526fda8614e22129a
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure83edc953e975ddb36
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure951402350d18f7432
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure9._closure16440f3d3d747a6ff0
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure9._closure17e4d9c99ee92bdf18
groovy.tmp.templates.GStringTemplateScript30886969e945e9dee2
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure17461ea224861ba97
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure105ef5a186b84c2f7e
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure10._closure2273f39a56456c445a
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure10._closure23368ff4a8feb87afc
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure11637c2a153154e3f5
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure128f7299a669f3c156
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13969df444ad889df3
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure2664aa4fa98d49d697
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure26._closure3237370b0fb94a333b
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure2769fc012d45c0f63b
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure29d8622af6f12ce563
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure29._closure33990c0f29cc3ee56c
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure305d57d27062ed98ff
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure318a550a4a9d4f2293
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure2d18da87fc8b8e322
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure346d7041954f7db83
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure417b762d22abda60e
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure5ecd04014a8d9c5ea
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure5._closure147edb8b055b118bb0
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure658d7ec9cfea749f4
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure75502974504c88f7d
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure7._closure151a0d2132a90876e1
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure87602e3082299f7e2
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure9c5311215c5c634a6
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure9._closure16ebc9826b41a6242b
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure9._closure17fe1e8c6046f095c8
groovy.tmp.templates.GStringTemplateScript31c7ecb61ba2c8e482
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1a5baa0db36a7a674
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure10c63abec3c9751425
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure10._closure224fa8cdb3973f6f2b
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure10._closure238932a832e5996efb
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure111bb4e50a7425dace
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure12965c829d31f7b11b
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13e70159824b2626a2
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure2645e91eac5fd7277f
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure27925babd3e8b953db
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure29a1b0c2498c2af846
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure3005b88ef738dd4c3d
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure318907bd599033b09a
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure2ab6cf104590d16ea
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure33605c0cc8ce37b5c
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure4add7f41cc434fbf4
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure5d8f5dc2d78ea7d47
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure5._closure144fbff68b8b86f986
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure636b1a9d075bce556
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure7bab05962dd7445e8
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure7._closure1525aa45be3c07674e
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure8dfde3975fa03dfca
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure922e88b1ecebcaf73
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure9._closure16afddfc9fe33ff807
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure9._closure176d796a5ab5a27efa
groovy.tmp.templates.GStringTemplateScript321762d60c8babaa22
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure167d77fd0b5ed8350
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure10df6b9f0c5a3e59c9
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure10._closure220b45359de1ca12b8
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure10._closure23f9f54d9cc8fa52f3
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure1192edb42bbbb69183
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure12bd2eafd0d9fb21cc
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure1375a4afc960d5eb51
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure26262ceda228743547
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure26._closure32475db53af76c4201
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure272eb354d01f33bdfa
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure292bc7fb880b20df29
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure29._closure338b3c1e0504c853ae
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure30ec896b7ed68c317b
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure318cf0646c87b60681
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure2244f1a88ebd308b2
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure3a7728db2e4de9a3d
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure4d3764f4ff7af1dfb
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure5849b786708beb4b0
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure5._closure141c137018fa3f6fdc
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure6841b6605e89010b0
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure73a670b0ab7b11a56
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure7._closure156543e82b831655bf
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure895bb57f393ada7b3
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure9ba822003d333030d
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure9._closure1663e17f8204959c73
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure9._closure1768d14015a05543ad
groovy.tmp.templates.GStringTemplateScript3358e709fe6c8a9042
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1b60c3529cb2b9fb3
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure1047a480492b076292
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure11ea257b34fec7a8b8
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure12a400b4eb81ff5181
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure130438020f867b5000
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure26076fbca7faeac4af
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure27d514fe2eb24a181a
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure29521513377626c20c
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure30b46637f98cbce5b9
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure318fa2d37f8aca9488
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure25eae43f37a66fd7a
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure3d7a049673cca3ae2
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure46916d98119264001
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure5b0bee45ed88d0c1d
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure5._closure142d770d962aa81dea
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure6ea7d2349638bbc12
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure7d5d5c52d6e0dd0c3
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure7._closure155ae48ca716194410
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure83c678d8e4b378f9b
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure95d5bb908d84998d8
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure9._closure1627f50176a60c405f
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure9._closure17fbb6a62f5307a89f
groovy.tmp.templates.GStringTemplateScript34067e1622d96d3763
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1530cc1c7b379c919
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure10edc9dc937ca8c211
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure10._closure22829ec5c10c20e99e
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure10._closure23187a86c0923c2ae3
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure11305f166824900718
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure12ebcaf54b09e20062
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13e0ef435f373270b6
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure26e1a70bbec7321137
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure27e762aad7f02661b9
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure298f29880b053491f6
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure308eeaa06d0a2ecbf6
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure31871fd606a8bd6ab7
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure28a08cd918e6f3403
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure3359c174e34a558fe
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure42e3539e99098d1e5
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure53c4630f3e817275e
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure5._closure14bb4a7d3e194c4368
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure6514ef9aed2c9fb7d
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure78bc9afda623ba52b
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure7._closure15e490b300fd34305d
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure801718aff40f15741
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure93a577639e82c5bf0
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure9._closure164b9879b9cbc1549a
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure9._closure176381148b8bbb3903
groovy.tmp.templates.GStringTemplateScript3549fbc9d03e4c0d03
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure182d78b3ecdbfd5fa
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure107506c3d60d91f94a
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure114897d97761e13e23
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure12f2e4ee7051e6702f
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure139173ee99d19ccbe7
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure26c0e45abb15ace0df
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure26._closure32efd7287f8215e9d2
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure271cc500295d5fc459
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure29f6fb60b478328cd3
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure29._closure33b47425e639a8d389
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure30d605fcea501e1f34
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure31844d6115a5c1f8be
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure2f0e994ea1fdac1cb
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure3454ed39becb1f821
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure49455af277e118c1f
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure50863acca38249ff3
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure5._closure148a2e00b0c9db315e
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure63f28bce259d257df
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure7647b61fdbb876fbe
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure7._closure15db37d78c683b21f2
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure8a8ad5082986b7f69
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure9dd8eef32e356c025
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure9._closure160f8c074d695888b6
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure9._closure17f0e6f2b178e9d231
groovy.tmp.templates.GStringTemplateScript369975a9c7172f43a3
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure140ba54354ef5f0de
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure106c57e2199edab4a6
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure11c1ce8856ae72756e
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure12d996c33db9eae0f8
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure1303d618d2fa6f0614
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure26a321a9b5620ff2e7
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure26._closure32a788c9506b20a075
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure27a02dff2aaad52a78
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure297c8c5975ff38abbc
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure29._closure33af5c3c5c95253e2a
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure303f341963be4f6272
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure3181bab820b2444ea5
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure27fca7f66ad04df93
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure3d4399ee5848c1940
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure4eaf414744d8a6a10
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure5540d088048705604
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure5._closure14d9828623b862a704
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure68d827337c4fea239
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure7e4ac3395d1423000
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure7._closure159bde7a19d72a1303
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure8e2c83e04f1c50710
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure945e4442ffed96c5b
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure9._closure16c3b084508ef2ecc2
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure9._closure17f54ed8fe6d1eef66
groovy.tmp.templates.GStringTemplateScript37d6f07635f00e79c3
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure191611ecc3033ec3d
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure10f498fd5cefe38ffd
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure11b9064749eb034c55
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure12c0b8d806e1ee90b5
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13724ab5141cc1bd45
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure268262f8b0b091030f
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure26._closure329fbd964acc3398e8
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure275b8a55d407ac8f98
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure29055eb1ca823eb699
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure29._closure33a64434caf15e654b
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure3067db45e4e47fb6b0
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure3182e80f33bf38dcac
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure2052b261d3cb12a5b
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure3a4eb5a305c98b99f
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure4509482baa30337ea
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure5602894b99843eea9
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure5._closure14e8e6fbad68f5d532
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure6e3e4367b4fe50e9b
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure70b1efdb208fefa95
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure7._closure15a4791e95422502ac
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure84b14e479295f2f38
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure9a23ddd24f5a3f78e
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure9._closure1687a4faa42c6b30ee
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure9._closure1766293ec49e4c0454
groovy.tmp.templates.GStringTemplateScript382447967e7ce00de1
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure13abbbde9be515d8b
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure10888d5bad3185f5a1
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure10._closure2221292578d7f51fd3
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure10._closure236b65107827b0dac2
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure11c53a52ef1add2a2f
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure124602407ca9d0433e
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure137a789a7398fd4779
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure26deb0c78719be59d6
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure26._closure32469df3da81d3f7d2
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure27c4c156d82e0dd93e
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure2976f56f0d191c0c49
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure29._closure33d1cc4b9aefe43e64
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure304a2d364ab36b3eec
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure3190c0b2d2f6abb2db
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure2668763a345174d60
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure3a04122b79452dd79
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure464b3d4a55ef749d8
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure5fdfca1da29440083
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure5._closure1445f86773dfaa1a01
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure64be5c6f8a67a2ce6
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure75894e67bc92edbd0
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure7._closure15573605560170fb98
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure898e430e6e648b6a4
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure98bfdda4d9e12ea0b
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure9._closure161b6a75ce5568c548
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure9._closure177521bdb7dc67cc5f
groovy.tmp.templates.GStringTemplateScript396bc2498c9bc13781
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1eb60f710c0974168
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure10104244e840bccefa
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure10._closure221d72729d05a634a2
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure10._closure23d4d84ce23c91cec5
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure11bdf29df05fac1314
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure125f2c5b47f1d43373
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure130be437b57e53fc28
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure26fff39682cb20a83e
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure26._closure327ea8acc026c0cf4f
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure273f66fc2683747cde
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure290f2787b2641a116c
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure29._closure33d8d4430c8b9f6505
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure3012c26acde95bea2e
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure31939205c1fbd720d2
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure21c663ad8d4a2b8a8
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure3d093e6624c467da6
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure4ded3426bb07e1422
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure5c9d93de3f977b82e
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure5._closure14749c1afd0f3d6837
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure6258383b42d618044
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure7b726285c10921145
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure7._closure15689161da947fea37
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure83138ea9b3ed29e8c
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure96c244346956871de
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure9._closure165f7e0b3af7f11964
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure9._closure17e6465b8d2f35276d
groovy.tmp.templates.GStringTemplateScript472e9da31e4127c52
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1e449204eb07d95e4
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure1009c945c4a104ec20
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure1180bfda6331f1b10e
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure12a2bbd4d451bde8e8
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure130b916462c21306b9
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure26718768f9dd072fcc
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure26._closure3243e5cb9c5dff3ff5
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure27ad0f14beb5f26221
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure29defdd5a0abf03c3c
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure29._closure332a5dbb94599d5ea0
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure3051b3c408b14a497f
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure3179340af6543e054f
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure28f8f733c0f70ed81
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure3614e1de7ddd25107
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure48c409a52982d33cd
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure5023c794d4fee5851
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure5._closure14d477c7a8371ab32e
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure6c047a385be6ab3b2
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure7bf8b374bffc408c1
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure7._closure150e269aa909ea570a
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure8a4207bda847deb57
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure935d3870e0abb78e1
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure9._closure167b33493400c609bf
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure9._closure17ded4f20eb58eab25
groovy.tmp.templates.GStringTemplateScript40bf87f0f6995c3945
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure19464ead6a619e8c1
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure10848fccda621b80de
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure10._closure2200561af1fc53fd8a
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure10._closure2329c1c63f38f99d90
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure118da650786d8eb45a
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure120fed53e7c97bdc07
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure1366d92305fd39d4d3
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure26aacc871e94740e42
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure27851e7779bc829a42
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure294f5f166dcd5ff730
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure3038fd5ee7f66b0ad1
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure31aecdb74fe46d556d
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure29c690ef1aa4ceaeb
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure3567538477dffb4e1
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure4933041324234189f
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure5828cb8c005e95fc1
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure5._closure14a52db675f3226fee
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure60bfcdf4619160bb3
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure7dadc8b2ff3c4b908
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure7._closure157c43550d0ee35b95
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure8d7dff50f5392313a
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure9fed61edaf4f65165
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure9._closure1658d5ef138ecbe93f
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure9._closure1736c6bbd2e2282959
groovy.tmp.templates.GStringTemplateScript41f0022f047e7d0325
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure145bfa02fd8dff422
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure101c40d39f1322bb85
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure11f56e9f6728ff8d61
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure1216c348dc917fac4a
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure1317458ec31b976f82
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure268b8fd61b46eaffaa
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure277eb9dd8711fb3fa2
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure29368dfed2b059ea15
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure3060120260ac5bde13
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure31ad9f005ce911c764
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure2e688578a3bf91f23
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure326a7fc92a5eb143e
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure42950d7fcacbd4565
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure5b6a924f9d5dae76c
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure5._closure149449cbfb23b51dd8
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure6659a9a0a920da711
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure7356e45082a78739d
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure7._closure1543e431819bec4a3a
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure87e032f728b081912
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure9190f87d1ff8ccab0
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure9._closure161cc191e72c523513
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure9._closure17a5a15de8117ac26b
groovy.tmp.templates.GStringTemplateScript42208c4f13571e4d85
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure187d27f245b95d106
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure100511f2508069f669
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure117c37ce46e76cc62c
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure123db1659179733c9d
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure1385e078883064a271
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure26e84a25153149ed92
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure26._closure32ea253fa7b2193baa
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure27c2512284e671d183
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure29bcfac7133753cd7a
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure29._closure33b913563ee7f5332e
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure308923e7e9420aa355
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure31a868d969fe94717f
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure269abbc068927017b
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure3b7d0b1eccdd6f55f
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure457f16caf9f26a36a
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure5eac780b3a58e2e9b
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure5._closure14c7e54d68520c8b82
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure6d73055df0f2152f7
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure7b5b9176040bd2c23
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure7._closure15030d9c1424fd78cb
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure8346641f4e2a6616b
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure981652ccce20366ce
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure9._closure16d0fd12facbf85167
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure9._closure17a00977a7048dff3c
groovy.tmp.templates.GStringTemplateScript436f0990e1b03f77e5
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1560935dd2553cde5
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure109ddeed15f150cd32
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure1104ff0159a21dff17
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure12249f7eaa21774cd0
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13f47cd54ed6ca1920
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure26c9097410e3d71c7a
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure26._closure32d21060bd150a0337
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure2739f6887a4b087463
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure29c5282fac4a55d05f
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure29._closure33b00b5ea8838e684f
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure30d1ccbb6e183a7797
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure31ab3a6e7af3e8e376
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure2134ae57d1892f4b3
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure3c702753915c25580
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure4ed91fa6171affe90
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure5dee21c8a75bd9636
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure5._closure14f68130e6829bf9b4
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure6b9561093843afe55
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure75a0bd9479901e6b6
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure7._closure153caaf898b1f26964
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure89dba9b893a3c4943
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure966bcb5c7e979fd1b
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure9._closure1694e96c0e69618d4b
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure9._closure17336e919df7df140e
groovy.tmp.templates.GStringTemplateScript4431908f3d05d8d0c4
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1b309c1335d019b4f
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure1037b3b1cfa6ff6db1
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure11de856c05784a50b7
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure126b553f0aa96a1d33
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure1310ab941e67833996
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure262fc1c309de0fc9e2
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure270b80dc8309640dc0
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure291814b490394783a5
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure30eb402cfa9ea859d8
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure31a3876b03d19f1d49
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure2c7ec6b1fec9b3dca
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure3253e2b101dad379c
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure4aab21a09f8116f74
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure5521ac8274527bd75
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure5._closure1460bc404eb17fa736
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure60265ca743578b93a
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure70417b3b09537935e
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure7._closure1582dec73f5adf1d29
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure8a0ac9cf831fa9199
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure901b07af6d91c3e33
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure9._closure16f88414c104ac998e
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure9._closure17ab5923392f638592
groovy.tmp.templates.GStringTemplateScript457e1550cfe2f9eaa4
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure162d28bca23c787ac
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure10af7cae8ad7c656ea
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure10._closure22cd601283674c7b3f
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure10._closure23b889e8cd4f5cd988
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure11a64da31a3d3b698c
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure12727b2431f16e6d7e
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13613739d8812d82c7
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure260e82920c0c91380a
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure26._closure3242afa2e2c7609079
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure27f027767da41da820
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure2961c65c2f44419e80
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure29._closure33865b6dddda95b309
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure30b3af707dc4988d1a
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure31a0d5dc10dce38f40
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure2bd0d32647d2ec802
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure355ecefc5c5b99743
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure410d28cc71698328e
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure5663f541e951405d8
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure5._closure1451d83dc061e8d500
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure66c038f38be631598
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure7eba57d974c8b59cb
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure7._closure15bd79a3b3cfd00c86
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure809704685e960b9b1
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure9e669e3fdd266a5e6
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure9._closure16bc906a35a63545a2
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure9._closure17383ec503dc316ea0
groovy.tmp.templates.GStringTemplateScript46ae9b30d8cb9aa404
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1a0bf54c1a08da288
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure10b62d8f45448d1b06
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure112f14f23bf2a822c1
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure125909097c1962fda9
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13f392cf93aade4f34
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure266d4761027b322a32
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure26._closure320af043cd2e55d9de
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure274ccf897e53974601
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure29ebb165eec34bb9ef
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure29._closure339d73746776185eaa
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure305a9e95f42ac9f05c
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure31a5220525cb66395b
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure2322ed9e8cff0d65a
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure3c49ba2bbad847622
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure46e7337942503d481
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure53a51f054e540cc2f
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure5._closure140274bb531051435a
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure6dea940ed234fe07e
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure76b722fff264e0675
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure7._closure15fd900e2670c13e77
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure84315280380cec1c8
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure97e0348e0cfe90998
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure9._closure1670ace928419f21d6
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure9._closure173d96ef4cc9c653f7
groovy.tmp.templates.GStringTemplateScript47e11eef2a2cbb9e64
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure171641e38de4bbe6b
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure102ee2900035b4205d
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure1157dc3d24b7d91bfa
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure124027124741668de4
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13820e62554c70f465
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure264c043007a9acdbda
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure26._closure3232c51cd78946e143
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure27b7682380feeee3e1
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure2992638d51be4da4ca
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure29._closure33946b7cf1126305cb
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure300271c97370f9249e
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure31a670b236c61aab52
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure248cf80935e452392
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure3b449666e7590d6fd
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure4d413a15acb8a897b
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure50e746c6d35737482
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure5._closure143310c6ddc0c6316c
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure6b0cf05a1a8544cdc
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure784c0e1d8fff2cce0
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure7._closure15c2376aaae5ce2fd8
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure8eac9f27e5854e9e0
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure999dad1ebc493924d
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure9._closure1634b897dce306fdfa
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure9._closure17aef109763a94b8c5
groovy.tmp.templates.GStringTemplateScript4813a90f61a055ea46
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1dabebd1d50290fdd
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure1052f736f1ebd25a01
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure10._closure22528ca5df6ecaa603
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure10._closure23742b22efe1f13dae
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure112be0288246077d80
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure11._closure2407d855ea573fc5c9
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure11._closure258eccef91b57197eb
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure12c69d8a3d09585e6f
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure138a3c4d32c84c0e59
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure2610d60f3000838103
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure26._closure32ebe57947c4a68e79
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure272823208cd74fb547
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure29e1c85396256f1e1a
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure29._closure33e3e303a10cd95ee4
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure302f87badd27edacc2
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure31b4580fd78f89c525
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure22b63c52d27e344a9
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure3b0e31ee9bd5ab21b
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure4e034f745367ef749
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure593a0590e84749aa8
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure5._closure149e0e5a037799fe5f
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure618cef52241cb6ea1
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure7d74afa113e22eda5
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure7._closure1531787169a69bd6ec
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure8393926e19743707c
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure9b01ad682af228fc8
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure9._closure16a87618b69a05085c
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure9._closure17bdf98a0578bf70ce
groovy.tmp.templates.GStringTemplateScript495c2cd0934774d026
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure10b65f7e42eef133e
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure10ca3829b49aeb615a
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure115328e79d037644bb
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure12dfb39106515c2e22
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13fba0e0f42ee2b508
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure2631955e35d21d70eb
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure27d3848a727a3610a7
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure29981abb295869033f
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure307768e65a7ddd7800
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure31b70ab8c482f5572c
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure251829c56b656b161
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure3c031da3c654e12c4
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure45a54618bd8f7aab3
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure5a785c53754472205
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure5._closure14af6a278da70e8c69
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure676a8b06ecad0c203
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure738f83436e79e2730
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure7._closure150edf15e53394c743
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure890e5fc9c4fd95854
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure957c34f89a458141d
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure9._closure16ec626642389cd470
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure9._closure172e9e6c3f8bed9bfc
groovy.tmp.templates.GStringTemplateScript58b060a269301716c
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure13298defb8ee773b0
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure10cc6efe31916479c2
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure10._closure2209ffe596492c2771
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure10._closure2391f322c009d4cedc
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure11011a7dd8c4c1d19b
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure124ca798a3bccd99a0
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13dd60c6acf009a97a
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure26c11eade690ab5484
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure26._closure323f583e649594ff46
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure272a6560575d28ee13
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure295b89b79b8aaa128b
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure29._closure3378df7afa08c28d53
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure307aaf90f815a1c150
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure3143a67fbbc0e07f58
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure2f758cbf9edcdde9b
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure374624f1593aad1b6
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure41bfc849b37ac43a3
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure5982053b9148c33f0
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure5._closure14a618eb04fea17ddf
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure611354708c2d4de80
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure7e895440999e30b9d
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure7._closure15a0d54a5ca5724d8c
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure897b28296821de02c
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure9828ebdcaf96accca
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure9._closure16223aee5aa794c57d
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure9._closure17260d21e5c76f61f7
groovy.tmp.templates.GStringTemplateScript50e5c875296bbb187f
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure11465319c5f672edd
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure10fc4703c5276ab9e5
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure11af8e98b772cbc563
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure11._closure24158eb1932074cd6b
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure11._closure25d789b61df66216c1
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure1269047dfcf223d877
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure131018bfa83bdf7a68
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure26256dc44f91a690b3
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure2707e5d0d3422fe3e7
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure297e56c4857222f12d
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure308fc5b1bb71313a05
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure31a2fee5f8f76029ff
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure22593efa8d1dd5f1e
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure3f095ea83a827a041
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure4046a21a48cda91c2
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure566489d5c3c396c79
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure5._closure148dbcd2087df2f89c
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure6e6f2b903559d101f
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure71e4339e1d41d05c2
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure7._closure159becf26982765484
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure8ecf629d52e4aab12
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure95641c743fffd2bfe
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure9._closure1665d1fb6d7a6970e3
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure9._closure171835dc34d8db7bb2
groovy.tmp.templates.GStringTemplateScript51aa4daadb8c9a221f
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1c5be7b6521a1323e
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure1064881c80565382be
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure11d74657a837bafc58
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure12702a66c7aa27a83a
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure136184126edd71c139
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure26042e954a4338615b
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure27fc427a2def564607
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure2907842c3a0f24ec08
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure30d72aed3c2b01eec7
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure31a1ac52ebfa1cbbf6
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure25f72b6d34068aad6
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure380472e567033009e
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure4be0ab76a6253cc38
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure5526d0165ec0ad4d4
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure5._closure14bcd8af86ad658aaa
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure68894fc4fde86bcbd
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure7f1f1f7c60da1cf57
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure7._closure15a44b96e51779452b
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure8452af3a8f6d0833a
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure9b1985e48f487b02b
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure9._closure1621c58599d8f0accf
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure9._closure178b523a0e2b899080
groovy.tmp.templates.GStringTemplateScript527ac3cacca5f96cbf
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure107d3a46ea2eb171a
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure107dd93d4fc518cf52
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure115e1f0689f829b715
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure125b584b8a422b38ed
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13f321e425f6820cca
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure2667eb6644349b7363
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure2740aa852e18dca826
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure298df315fb882ecb67
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure303e1b08b5c5509381
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure31a45b8bdeed990ded
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure2d0515d5ff2b6b48e
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure311306328180ee1ff
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure4c0ab0c3951c82a37
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure50e03a52f9c5e1d23
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure5._closure14ef742915dcdc1cf0
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure63a3e339a43aa495b
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure77126a5ae676490e9
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure7._closure15e4a23b70a86877da
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure80f4f9d2e9f7efb43
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure929f2f555e9081c55
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure9._closure16edf906843f5ac8bb
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure9._closure178efa10413e7eadd7
groovy.tmp.templates.GStringTemplateScript533546153e42d856df
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1d608ee97dc2d0bf9
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure10e516220ab421f409
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure1126d7c996bd588e2e
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure11._closure24516349bd5681b0f8
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure11._closure25a74e53b3db012ac9
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure12427650b11a2f48a0
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure1382bd49e3102cb79b
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure2646a83741e605828b
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure26._closure321bd855e20fad100f
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure27bb0d2fd0b5a50dc6
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure29f421fd44f528d642
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure29._closure33ebb246a015ea1314
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure3066f454329f604743
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure31a7093ccde0e59fe4
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure2aab0042463034146
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure361e2a7fdc01a4120
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure47acb9af7bf4177cd
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure53a2639164c6da58e
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure5._closure14de10549b0c4b6ec6
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure6545876d6c8b1e5f9
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure79e946b89bed85a7c
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure7._closure15db055ffc3d676675
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure8a693475347e4d36b
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure9ce2b6c5ee2728780
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure9._closure16a9ed78709dc31497
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure9._closure171d9df67bcd2c46e5
groovy.tmp.templates.GStringTemplateScript546bdf0ae2f73ff1fe
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure133081a79a47f5d53
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure104f7b7ed0e38e548a
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure11fcada4ca670f218e
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure11._closure24e4e3ee04693860af
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure11._closure25f97cc4759ae646de
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure120dbc111192321943
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13666a08b3a165972d
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure26a0608058dbdd5713
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure27897b7b29f7c97465
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure29291d6678863a85b8
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure305c78c3a619f2690c
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure31afb439b4c29261db
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure27e168a46970a883f
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure383def9d4c875233c
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure43de87a9f36ffe629
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure5b6deedbb7cf78ecd
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure5._closure14482d24333faf3044
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure6ef6bac3179f3a296
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure7c088017eb2ee2f94
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure7._closure156571605bd64a1238
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure89b8540224c220bb1
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure9a927a36fd21744a8
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure9._closure16c58000bff00e0052
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure9._closure1785aa44df1590d779
groovy.tmp.templates.GStringTemplateScript55245ad510101ecb9e
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1e2d35080dab941b0
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure10d7b4619592b76fd1
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure1184656bd5227e18b5
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure11._closure24d8b8b9e1bb6b4bde
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure11._closure2546c198ef81c752d9
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure1214920a2aca36690e
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure1317f6a57547cb2c7c
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure268123d15d0943a6fb
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure2772dcd1d75ab0d185
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure2950cf8ec7fb3c989d
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure3004979f2143c2bdce
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure31ace68ea7cfeef3d2
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure204f7d33d06bf7df7
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure3f30c3d01106183e3
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure48788ec51d876bbd3
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure582fb7182acc43660
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure5._closure14794959bdef384272
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure6810de97df2e80e34
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure72f3acf596b52e501
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure832599a5f94b82399
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure94efe3a64d96ddf7d
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure9._closure1681947e4b5297dc7e
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure9._closure1716cda2e5e6c23c4b
groovy.tmp.templates.GStringTemplateScript56f4d4b507397d853e
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure120be8f8b59f36494
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure10cee5405a01fc223d
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure110d3c3af4eded53f8
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure123fe02767223af9d9
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure138553533e6c38e18f
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure26e2e622537ee0b4c3
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure27ce342ed4ad3a3fa4
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure29dab8b7067c36bff2
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure30eda67aa8ad93c088
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure31a9115792d86b45c9
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure28bd438b1b46163af
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure3627b707f785c6282
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure4f9295702ebed5ddc
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure5de95d5c8dc90ff97
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure5._closure142ae5df2e9e81d428
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure633a726a86fc4fbd2
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure7afed9d310197babf
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure8783cf4d9fd165be0
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure9d6949179c4e27303
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure9._closure164da8fd56b53db80a
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure9._closure17136588aaf335011c
groovy.tmp.templates.GStringTemplateScript57bb516af5de5cbf5e
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1f165c57227357877
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure10562a5f1f70c51966
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure1175f4f5eba89c6ac3
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure1226ce3c5c7a3e8994
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13f4cffef88a965ade
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure26c3a57356ac7e452b
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure26._closure32fb0d298893e1f27b
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure273593842a00439a44
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure29a36a5fb90130a2d7
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure29._closure33cfd264f984077e90
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure30b549262ff7a3144a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure31aa43e081d517d7c0
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure2f13561ca25d49667
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure312a9b4aaa048c25d
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure44349c1cc05640026
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure5eab049f10ca3473a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure5._closure141b81a2a04e16a61e
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure65dc163e4e4df5770
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure7405f5316d82b702a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure7._closure152598cdce695b20c9
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure8d1e02ea4258c73c8
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure9314d0872cf98e8d6
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure9._closure1609bc83a217a46426
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure9._closure1780026e900067ea2e
groovy.tmp.templates.GStringTemplateScript5849e68abe52b2cb7c
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure15abf6657a957c9c1
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure102a3ff9eeaea3633a
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure1109c8e04d59420cb9
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure11._closure2447540ebdb2ed96e2
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure11._closure258a6352cd2f6ab6ff
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure12a074a42632005a1f
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13fcfdd19f0eaaa0e2
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure269f774c6105511ff2
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure27aad8872629e2cce2
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure29d0c1817e9a121807
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure3098bf5581a0b79c16
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure31b86b5d609c84b9b7
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure2929924745c72f15c
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure31603cc2d6882a6bb
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure4776e97d3f8907e14
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure577647c92bda4a910
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure5._closure14b69f3e7ef949692d
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure6f5c093670d40750d
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure713d548df19fb516f
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure80210fa3bea9bea54
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure9188d0f1ba429f553
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure9._closure1695720cc86ea79180
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure9._closure17930aede3424c2225
groovy.tmp.templates.GStringTemplateScript590663554cb593f11c
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure18b642caed791d522
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure10b2f0e6abdf9a5861
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure1171002f521c333582
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure11._closure247b0f595860bebd93
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure11._closure2535de0e57344ba2f8
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure12b95abf1d6a042a52
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure138d617c59e8041bb3
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure26be341d64d7cfee1a
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure27517f2dd8849b6902
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure29a91369c1e7140522
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure30c0500906fa8748d4
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure31bb39ea7391f82bbe
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure2e8787d0fcdc70494
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure366d108f8b0960664
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure4cd0e011d161923ee
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure54341e0ab6d9711bd
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure5._closure1487fb43f029de1b1b
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure69ba6d62b865bd9af
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure7fc6786f8c0479bfa
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure7._closure15e970b281bf01c852
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure8abcc20463201c27c
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure9ff549610af536e86
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure9._closure16d166723ccc3e4dac
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure9._closure17006d0bd9b11ec917
groovy.tmp.templates.GStringTemplateScript631367a1f0a34662f
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1f9eadd24cd48594d
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure103286322ec1c5c7e5
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure1133f49514db917025
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure12ce834c3b8b5d0a79
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13167221fea626593e
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure26a0b4e2c7465fd95d
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure26._closure32ba9e206dcd28be93
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure2713dbfd6d64477a44
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure29641511d6e9446153
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure29._closure338f583948fb22f946
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure30078b6de9f89d5921
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure310c10e06d7d82f161
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure27e2002b7ca0a8bb5
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure34b16b80341235065
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure41338a7c1c72fd310
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure586042ca5f92a8f12
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure5._closure1430a99ef1a46d2ecc
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure6d2a26a9f471669d7
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure711b7d1cf338a0e79
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure7._closure15e3c13b4250da6207
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure8c305894288bdfda1
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure9eb69f287ed1810b6
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure9._closure16c92007e94e63903b
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure9._closure179f6755d8504d3e80
groovy.tmp.templates.GStringTemplateScript600b18fb497c927b31
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure124675c4354e464f8
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure10751e52e4e8f9f2a8
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure10._closure22814eac5e37f75bdc
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure10._closure23209ebc860ccfdfb8
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure11c9f7c1e653045628
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure12c23f0fd1bfcbd4e7
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure138b5a1a5e70f489a5
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure26058e01bc9fd133a1
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure2730e9382c41d86909
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure292d4cb3bcb3a5fb0a
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure30e68c805ef8df6b78
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure31b6ab1221c277ac49
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure25f9ccc435d6f8100
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure3abb49dced64f9da0
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure40d84801fdfe90a24
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure5fb04f3f8764938b0
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure5._closure14f40f7e8eee83410a
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure661e013cc80003cea
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure7e3e3eeb3bc77c09d
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure7._closure15031c1bc417c945b6
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure8a18c4cbba823056a
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure91ff9ade8e2e0a452
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure9._closure1622ddc7ee678eda87
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure9._closure176b20741e97ce8c8f
groovy.tmp.templates.GStringTemplateScript61449d24bb9bb34151
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1f5bc16ba2a22781b
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure10edd14da199c0c9f3
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure11b13f0ef916756f13
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure11._closure24e8410b8edc511367
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure11._closure2565c42d62436e61fa
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure12db1114eae7cfa4aa
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13fac6b798965a32f4
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure2624cd50b94d4fc249
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure27cb4e92d2eca1cce9
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure29549e5b03cea3e62f
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure30be63dcd9a2efbfba
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure31b5f9a532cf0b3e40
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure2257d9538ccda74c8
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure3db66591b0e5b3d7f
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure4b7e416d1316057de
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure5cf216fc1a67a801d
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure5._closure14c56b03003e14333c
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure60f8656800b1b9048
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure70c51209465cb0a08
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure7._closure153cbb7f4882c65419
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure8085096c670b92d42
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure9f82034e3e99a3f87
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure9._closure1666c9b91ac51706ab
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure9._closure17f8479224649c67bd
groovy.tmp.templates.GStringTemplateScript62941344acb2d00ff1
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure137d1c9b1a9685d3f
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure10f4806c6e0a8b841f
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure1138665fd8d9e6245e
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure12f06339a70fc3347d
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13686341d3bda9ff07
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure264708a3b73aecd071
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure2777a66dd11b2b22c8
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure29dee962c249a9c140
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure30575239504cbec2fc
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure31b00e7c07d88e885b
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure2aa5e7eb47e046a90
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure34a1114656666dc1e
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure4c945ad8202fbb1d1
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure5934fcb8bd62e49ea
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure5._closure1496c785934fada566
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure6bd2c9955963765ae
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure78c8672fc0f0e55b6
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure7._closure157c52d2dd3dd766e8
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure84235f8401917553b
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure9604a9ffef41593f9
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure9._closure16aaf53a0722bd62df
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure9._closure17fdefb86b716b5aea
groovy.tmp.templates.GStringTemplateScript63db969b5e55f13591
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1e60a8348d7ae41dc
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure106c4f732b7bb2bf44
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure10._closure22c5a354704102264f
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure10._closure235059592821ace3b0
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure1140ae90c79c971d65
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure12e94d229c57c74430
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure1319ffec155b074456
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure26664bf2b2e8722199
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure26._closure32f1800a0320442546
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure278c01c72fb6528728
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure29a73b8a7d34afdc65
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure29._closure3307796eb9af469ef9
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure300fbd65d7168e163e
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure31b35ccb14d5f21a52
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure2d0bf27cfefb19f58
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure33ac3d0b0be727cc1
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure473253b4cec72ec2b
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure5a76a57b2061df147
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure5._closure14a7a3f81d9f3ad750
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure6d34adc191d2cc90c
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure76334bcdbd6b29f23
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure7._closure1543f5b651a8d87747
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure8ebe9223dc18d7d13
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure9879306f5ff6f082c
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure9._closure16eee144f38024bef3
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure9._closure176e885e518239b1d8
groovy.tmp.templates.GStringTemplateScript64850f8482e01692b0
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1030a77a6affc1776
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure10c6222ff12c1d1fc7
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure10._closure227023f3c97ebbf618
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure10._closure230e6bceee604b8fa7
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure119ad4fd9b46c0b2c5
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure12a687633cdfda15d3
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13fd28ad45ea4e64e0
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure26808345abd5aaf401
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure26._closure32590a9746553d8e95
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure27be7793d6f43efe8b
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure297a07114147bd8f9f
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure29._closure333831555a92261ede
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure303531f243901c3871
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure31bbe1ce6df785e46d
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure20419a9ad1bb85621
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure3d8ff8e99b61d1edd
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure43406db2465cc7dcf
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure52b92831f3687da04
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure5._closure14319e88b5acde89d2
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure6687906feac6e8e63
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure73d28d62cda84eacb
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure7._closure15fd8189f643f5030a
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure8d6ff254cca4ba5c9
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure9e09fc9c4cf0acb04
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure9._closure16828c3c3cede9aa36
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure9._closure17f6bfecf55a852044
groovy.tmp.templates.GStringTemplateScript65ca8a5b700737a8d0
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1d2d13d5fd13a0b95
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure105eed30b45d24249c
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure10._closure224c78a42cace8dd69
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure10._closure23b1d692747b6a9ba0
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure11e21c328403b18bfe
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure12bfa9780787de659e
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure138cb400830ce0dfb1
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure26a1c014ae073405e9
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure26._closure32613fc85cf22eb608
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure2745d0392859475b6b
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure2903d5f9fe3abb92ba
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure29._closure3331295dccf65d45bf
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure306ddeaec4ca2cecb3
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure31b8b3797efaf97664
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure27ef8f0d68a0da3e9
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure3a82d4a4c6e09be02
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure48e664dea8b452035
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure51fb71f26e6b462a9
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure5._closure1400faf53b7c49fbe4
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure6061f43b2277522c1
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure7d29a180b0338205e
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure7._closure15c226ed7ad6fa12a5
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure87f23ff3112d18de1
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure9074650cfc47050d1
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure9._closure16c69842c84f70761a
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure9._closure1765d80acfa9d7cb76
groovy.tmp.templates.GStringTemplateScript661a043b672e54e670
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure110bce25452702eb1
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure1047bc117bce6f6970
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure10._closure2208955c02da1da0fa
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure10._closure23c11177da5609a7a8
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure116b4563a5cc22c0b3
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure1294db554a6fd2f549
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure131e11f6c827131242
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure26c205e7a0709717d1
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure26._closure32296029731b1bffaf
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure27f938c62baecdb54a
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure2989a2c03fbdb1b5d5
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure29._closure332a0144765ad0a81c
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure3084ef4b4d247d91f5
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure31bd44a04bed7cc07f
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure2f1db1b5a38d3bdb1
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure3395a073206345f63
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure4f0c7f6b9b8dec63a
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure543d9bb6c96e0ab5e
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure5._closure14535673a80df06dbe
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure6b4b58c67ba59d727
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure7524d4a6369fd7fe0
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure7._closure1582cf40ef69eb2054
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure8354691b77b7ff598
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure99f2cfbd2d9fffcaf
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure9._closure160aa4c1d5a8da126e
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure9._closure1760702080bc20f621
groovy.tmp.templates.GStringTemplateScript675581e495c975dc10
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1c167a8ad2cb63252
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure10df730e3ebf56522b
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure10._closure2234ce0be7084e8b8b
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure10._closure237eac2b404d28b3af
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure11138dacba8953f988
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure128df54e7137d68504
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure136f8d5b0ec1bda913
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure26e346b6a5a209e639
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure26._closure3211557669bc08c732
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure27029f6cd503b410aa
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure29f0702880c0b7a8f0
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure29._closure3323194ce03eabf37d
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure30dc0017ca7e4d4537
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure31be161758e0005276
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure28b3a4221a9664879
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure34988c3e7de20ffbc
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure44aa7607756579bc0
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure577fc275546d313f3
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure5._closure1462320e26dd671f88
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure6dad3c92b31427b85
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure7bdff8444b041b575
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure7._closure15bd682463fce431fb
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure89c9a4bcaa3e5ddb0
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure978f562d9d285677a
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure9._closure164eb0bf210a43ce42
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure9._closure17f317c6ba4f721d13
groovy.tmp.templates.GStringTemplateScript68a73604de459ba832
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure16abd0b88a2d483e4
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure10a366a8cf61302877
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure116fb1b91c788d9ff2
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure120b4fd60b7fe8568f
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure1367bf74694581532f
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure26bf9489920b26bce0
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure26._closure32c87513f9f1e8a808
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure279dd46fd92a15460c
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure2983dbf6475b951220
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure29._closure33549133b02011a852
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure30f1f664642959cd6b
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure31ac3eaab9a9933c01
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure2e896079fd0c02f42
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure34d22bb6016ea9b5a
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure47e803668aba3e5f2
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure5ea281236f7d4fdd9
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure5._closure14cf2c92f86a38d0bb
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure672d239a8d8dd59f8
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure7ee759f8d71919430
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure7._closure154e273fa0bfb1c8cf
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure84f6a9f556cf2442c
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure9513565b0b9347aff
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure9._closure16d27e304b73403be4
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure9._closure17e01f45c90d59d518
groovy.tmp.templates.GStringTemplateScript69e8b3db2ca2ba9252
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1bb664171dc129f07
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure103ba9b78a1009132c
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure11177976033dfca6c9
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure121261cd3027ec26c2
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure131623d9afa32fe87e
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure269ed7d897d9b84d08
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure276673c527876ce3ec
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure29fa091ef826930f05
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure30a91938e3736919a9
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure31af6c1daaa4efae08
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure292775ee44175da8a
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure33df07fb5cefe3b85
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure4c4e0a0a6452ab808
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure5de0d8e0f27e74574
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure5._closure14fe48ef76baafa28d
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure61cb47ce453c6f55a
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure701c751aaa82d5ea5
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure7._closure1571805b2c2abed960
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure8e6b64528b4686c04
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure9b6ecfcbbb24ee12a
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure9._closure16966a4ebfd1d9e7c8
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure9._closure177378a3f3fe0b3e2a
groovy.tmp.templates.GStringTemplateScript7c8d9aa087d276b11
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure12f3b2391f3d2bf19
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure10f72189dbf1a55207
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure11b25132af2ea110b0
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure12209f004c662d7b31
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13c0838330943cf6fd
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure26102d27d80bf3a215
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure2794b189848c9df676
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure29e16173edc81e4fe4
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure302c9739195c76d10e
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure3136829520e95c8b76
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure206f7ba7228b7b8af
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure35e3aeaf10f5bd0d4
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure48484b90868aea37e
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure51c180651a248e4b3
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure5._closure1442c6b25d6dd6e03d
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure603d08e123ba804e5
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure746a9a28d55ad0d25
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure7._closure154d32ebb7fc427881
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure8f097700e8eddf6da
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure95c34c8431ec9a49d
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure9._closure169029a087e9315cf9
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure9._closure1767be863322acf452
groovy.tmp.templates.GStringTemplateScript7051577e968e755a0b
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1a4668709ad9aa2e4
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure100dd69dfbad88cb93
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure11ebdf09294c412711
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure11._closure249496073cebd06b3d
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure11._closure25ded6cca4c25454e9
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure12a4d621ca8493d097
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13fd9b86f3b612271e
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure268a2f42ed9a03ad50
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure27b2129f86bf7510ac
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure291c4561540cd8fd17
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure3051b46f027f855bac
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure31ba984096d17ad0db
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure2e6662d1a26fe34f5
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure30d544f0a03978900
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure49adee08911078379
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure51fc0d6644f990b08
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure5._closure14dc9e1af36053d678
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure68cee7589cc8b2746
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure7277c5c7d9bae7c57
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure7._closure15e4b3bca09b5c4aa7
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure89aa59061d5fb9f42
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure9b76e7471e9ebdec9
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure9._closure161fd9d390932c435b
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure9._closure1745d313f8ad3dde64
groovy.tmp.templates.GStringTemplateScript711ed2a1646954606b
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure175bdcdf0d35cbe07
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure10951982bedcb1f0c8
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure119317c63609301e2a
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure11._closure24a8cd50d93983404c
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure11._closure25616b903ed97540ee
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure12bdf83af1dc97a0da
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure138c072b3550bc9c4f
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure26ab6c13e8489d5cb8
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure26._closure324822816974c54744
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure2749b53578120cb54c
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure29659789eb71dee032
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure29._closure334ef0679df1d45360
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure30095b338525b58f6e
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure31b9caf785dc0642d2
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure29c877461b74bc13d
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure37d868bdfdb8329df
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure420be7647ff8ede83
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure52be54a5d9faab3a5
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure5._closure14edfa677db0c4a44e
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure6e28830c547908be4
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure7c8ce925a4212b6c2
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure7._closure15db14d82c0e535b08
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure833794a1c0d61b76a
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure950b7ed7ae291451c
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure9._closure165bcdad6431b59f77
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure9._closure17d6b4f5c25e6f3556
groovy.tmp.templates.GStringTemplateScript72ce5cc17340372ecb
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1b7d012fb50169b23
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure108c48a3714ffabd24
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure111a4e9717c6a35567
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure12968a17bc349b300d
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure131ea2dd7e7b4f51bc
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure26c8a9e0e63f3e4e80
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure27f55dca7be5865b6d
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure29efe0b02af6d4c75d
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure30e06ad60ccbe4f228
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure31bc3d2eb0cb83f4c9
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure213a49fed0595df65
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure3ecf1c6a1b3bec8be
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure45e1fcd14cc15388c
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure5778bee17effe7a52
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure5._closure14be56e1eec17d3214
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure65022ff10dabc7e02
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure74819c03228d7e97c
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure7._closure159bfd75b9b14269f9
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure8791c249a64cfcf13
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure9c8dd4667ff1ee962
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure9._closure1697f12e79d61ffb03
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure9._closure17d31cdf8d4b980801
groovy.tmp.templates.GStringTemplateScript7381d91e81a71614ab
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1660b58022ed087c0
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure101487bc343ec3867f
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure116286580883d26c5c
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure128fa40c876c9f4040
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure136f3e70b89de1eaed
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure26e9eab1e3eda0bf68
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure26._closure3238483f5c3ae3367e
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure270efa608548fffe8d
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure29963258958bd2da78
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure29._closure335cc076b13922e5a2
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure30b8858a8b91d426ea
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure31bf6f99a3c6ff66c0
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure26945c69694202aad
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure39c2302746baa6861
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure4e47f5bda229c6576
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure543ae722e3fcdc2ff
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure5._closure148f329c6011ea4022
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure63e44ba5c51a7d2a0
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure7a7ab0e15f16b23e9
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure7._closure15a45a1135244d7856
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure8d0c0fee7bc55e73b
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure92f04df6cf46472b7
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure9._closure16d3e5508d7486272f
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure9._closure17407b39b7b8cae333
groovy.tmp.templates.GStringTemplateScript74df40015d12f1b38a
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1830bacec5682d16a
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure10beeae0ee696c26fc
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure11b8fc35545985c3fc
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure11._closure2465fb58aba29cc6f9
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure11._closure25f023beccaed004f6
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure12c06e4d27e48211a3
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure138be931e82ca8ca5b
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure260f2206fad0786af0
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure273c8c347c0a93872e
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure294b0ec3a9f8c08982
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure3082091d1f174608a5
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure31b7d29cdae48898ff
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure2bde348f46029e3d4
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure37e1f5c5d63c50a7d
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure4a35cbbb2ab22f492
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure5cf56a6830f57e9bc
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure5._closure14190fecc8220e1ea0
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure6857760bbe0e595cf
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure7f9b764e2fd5d5601
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure8edd6f996b7933fe1
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure94808105dc401b19f
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure9._closure16bf882842194b33ea
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure9._closure17d84c8b13607672af
groovy.tmp.templates.GStringTemplateScript7590c5deaff5d089ea
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure152d0e6152844cd89
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure102625ffab18551da7
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure10._closure220cf4ff7b493a8e42
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure10._closure23b5792f28e171bab4
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure11c034fa4b1cf4fac7
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure12d940561cbc8661ee
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13fa759c2eca06710a
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure262e6157ff02e69b18
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure27c72b9e82a7ea22ce
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure2932dc2b1685c694a7
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure30dae641984d76dc67
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure31b4802bc9e9f40af6
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure2c702118ff19c161c
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure30ecd9888bbd1aaa2
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure4193c2d7c45aba968
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure5fb733abadf645111
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure5._closure14286b9146f2996c96
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure6eb1125f76bfe396d
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure71605aac524e19c94
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure7._closure1525894a1e5a6f1db4
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure8440a23eb6f0917c9
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure9afd18956cf7b2a4a
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure9._closure16fb9c56b6bbd2efc6
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure9._closure174b2b6d299324999d
groovy.tmp.templates.GStringTemplateScript76404bbeb8dcb3c74a
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure190bd391eab0ee8ad
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure103f74de648b1e504b
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure11496dab6ad367b18a
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure11._closure241d4df760063a901b
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure11._closure253f5907f898922cf9
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure12f2327b51548af139
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure1368d06a65e1f5bcf9
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure264da4a4f175458920
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure277bc361815060ccef
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure29b8ab12d702ccb3c8
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure3033d7a411a327a121
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure31b177f2fcfe71bced
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure24821fa0343420844
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure39fbad5f6d3ec4bc3
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure4679d962f76304f67
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure5a71d9ef0af3098e6
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure5._closure147bc717d58320facc
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure659bbea22f6d2cc8b
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure796d2f8ad4e24c32a
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure7._closure156560e78be57e2f45
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure80e6f4d6d06a76fb0
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure937bb224bd2f48634
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure9._closure1637a0d5ab5c788bb2
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure9._closure174e83476686d3a4ca
groovy.tmp.templates.GStringTemplateScript770fce614a3b92fd2a
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1416673e7d5c8f44e
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure10a7bbc121fa276b10
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure1131a56475961688b1
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure11._closure242116a085d469bb6a
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure11._closure2580e45b6283b338fe
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure12eb1c606a0c8e8174
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13194cc7a3075b07a8
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure266ce7f5f4a7db78c8
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure26._closure32d89d4336a6afd40a
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure278064cb7ffd19690f
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure29c179fa687fcaaeed
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure29._closure3378a054e8a8cf8826
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure306b38f896f91775e3
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure31b22545eff30d2ee4
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure232c0a378d2f7fd8c
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure3ef6811230bf8eb1c
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure4ddfd00e198b9129d
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure5933802c97f03204b
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure5._closure144aa36a5b53b788fa
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure637ddaf6e7dc96029
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure77960368a979809bf
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure7._closure155ac7830770713eea
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure8a7b39710de3d4798
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure9d062bb40d98e1de1
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure9._closure1673b4ab5ffee1579e
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure9._closure17dde4a15c75814ff8
groovy.tmp.templates.GStringTemplateScript78fd798101b77c8908
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1eabcd0c25baa45f8
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure10dbae67d02441114c
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure114d9971d367c8eecb
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure11._closure24c64cb812794930b4
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure11._closure25833c28741b5cf4d7
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure126da6f81044b052ff
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13117ee8c48367fd94
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure263035cac30ef42211
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure26._closure3201bd26a6eb4fbb30
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure271f2fc873d4b83fa9
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure29b2d224afe4e8143d
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure29._closure330f282bb8b675d309
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure3046ce8b38ae03fdbf
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure31a00df80eba9e4093
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure2516ce6c6ab519ab7
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure3ebc269a4c3328ffa
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure4e9da56fe654d6caf
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure50eec37aace04ce61
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure5._closure14e7bdf685e4e847c9
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure69fdc5fed94564254
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure72aea2d43564828fa
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure7._closure15a98898c43324c7de
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure87443438f112ade04
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure9f9a2bc29b23f0064
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure9._closure16ef7a243587e2a238
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure9._closure17ceec222f37aa87f3
groovy.tmp.templates.GStringTemplateScript79b2fc5ef3505db368
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure13b679a3b256c591b
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure104361789555782a17
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure10._closure22af431fc292ef780f
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure10._closure23c666b99054fd4a95
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure113551becc22b9d7f0
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure127488e32b1cb422b2
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure1360e2450265c946c5
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure2611769bc6dc6ad3f9
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure27e488628d79c19a49
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure29cb00cc1099ee0918
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure301e21d7bff433297d
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure31a35f4f1db7e2d29a
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure22b8dbfbd3ae46f7f
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure39b10ad711b262f25
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure453bac0308bc43155
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure53ac9ab931e3776cc
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure5._closure14d6d98b0b347f35ff
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure6f1ba1aa11f4deef6
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure7c558e3648ff4e26f
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure7._closure15962ffc48a62bd671
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure8dd9f99f2c9b0f62c
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure91e7b2522b9459bb1
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure9._closure16ab6e5ac1257b7e14
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure9._closure175d8bc415c4f86cc1
groovy.tmp.templates.GStringTemplateScript84a281ad780c6215d
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1a9812f33bec13e12
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure10926a75b9e38214be
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure114b0479534cb337f6
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure127a2886b48fffa78f
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure1347daf92b9aacc7ab
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure26472e547e86d518a9
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure274bf56056504c337d
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure29208f4c952449f35c
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure301520304f05b828ba
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure31f7ee75aca1b43daa
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure27a6c5604906db93a
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure39c9dc1bc97f4564b
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure4af5017395a237101
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure57aad853ef974abd9
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure5._closure145eb2107d5c2bff61
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure6ae1b15dba9606eec
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure73b01605156601f53
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure7._closure15b2775ed2df4ae927
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure846fc548aaefd9d62
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure9224eba3a5b700910
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure9._closure16b758effba71b5ca5
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure9._closure17eb7d22f8e907d6fa
groovy.tmp.templates.GStringTemplateScript8064c5c976c5f9b47f
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1546d5daa8814c055
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure1071ea885d5c56ade9
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure11a443353ceab0f977
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure11._closure2433515d247c7d4bb6
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure11._closure25e4e514d7d4ccbb25
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure1273009b52fedbee44
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13dbd3b4dcd1961be4
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure26294390d2adaa820a
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure26._closure32532efe16439b9fb6
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure27592dd485b35cb1fa
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure29b336ca8acb43dfad
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure29._closure33780fe774c5b1b25a
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure302dd99971d1d24f24
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure31ff9a682b303343b5
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure2c455815f98879292
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure38af0e572845f4364
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure4b48ac7df0efa7707
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure527bd02512c280ce6
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure5._closure14f3e3046fbee489b7
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure6c7b674794f62b864
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure74c5fd666526fac76
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure7._closure15cf80f3bb581f1f5e
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure8523660b54a3488db
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure96837b47680806fd4
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure9._closure16f4e51d1ff95540ae
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure9._closure174a901b7bde7ff5ac
groovy.tmp.templates.GStringTemplateScript812b40168422d88e1f
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure185b61753f6d2dcb6
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure10e92597182d6f96b2
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure10._closure225a5efaf497db030d
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure10._closure23a1bf85339b6d0567
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure11dc8bfa23afc1c04c
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure126a2e8069a6df9e09
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13aa4f191a3738a0b5
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure260800c1d77f3473e2
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure26._closure326b1ba10ce488a72b
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure27a28a7e7b1e25141a
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure29cae42235b645c288
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure29._closure337117efe2a1cae93b
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure307536c5f68be29be6
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure31fcc8df383d4fd1bc
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure2beb4d8240932675a
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure3fa2221a75c4be3bb
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure40eea5111e0732afd
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure513989e68fc1bb44b
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure5._closure14c28779e16e73fb81
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure6a9d03135c47914c6
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure7a3ed18418bd366e3
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure7._closure15f0279737cd100ef1
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure8fbeabac892aea0f3
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure98fee2d7d8bfaf401
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure9._closure16b0f163eb5bcc9c82
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure9._closure17d9f7fd412d2d1e9e
groovy.tmp.templates.GStringTemplateScript82fbce76930bbbc0bf
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure147dbc8587598f992
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure10f074b6d7be24db5e
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure10._closure221eb302dae12e7e9e
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure10._closure23d178609db60e396f
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure1155d2ab0260528b01
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure12415cad244ed30ede
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure1338eaef511ccb6d46
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure266bc532d9089761da
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure26._closure32234440230dbdee8c
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure271e628178e9affa3b
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure2940931bf4314fe5e7
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure29._closure336a3ff6580d470498
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure309c07207f65b3e6a0
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure31f93f060d2aca67a7
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure2319733a8bbec7902
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure36b556cd9347602da
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure4704bea42d3e8ccf2
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure54ff63a228c4f7dbc
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure5._closure14912bff721fca6ddb
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure61b7afee05955e120
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure7233a4a29e116395d
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure7._closure15b0ce3aa272013c00
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure8b18fd44efb00d88a
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure9178486609675587f
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure9._closure167ccde0f6bc66f8f6
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure9._closure17dc5fd70e38da23c9
groovy.tmp.templates.GStringTemplateScript83b44ba961ec9afadf
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1960082a10b5ee571
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure1068bba992cf1de005
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure10._closure2222e8553f337d55ef
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure10._closure236ec53c07ad2f2d68
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure112d1a641d2523b23a
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure125872b61f16d77e93
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure1349764297fa65d617
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure264a8663dcda099032
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure26._closure321b711f39aaaed611
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure27e5c52b8644d65fdb
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure293941f34b4c49f8c2
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure29._closure336327fece693c5ff9
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure30c4e87cf83f833262
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure31fa6db11e27b6f5ae
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure24b766ad32a598cca
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure31b87a80cec62a205
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure4ca2b7c8c3d619108
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure57bd3a61b5c7cc511
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure5._closure14a04f82fccf5d1fed
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure6751cbbacd24e4d82
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure7cc88840e38aaf3c8
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure7._closure158f695e2ee70e2daf
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure818530e33239af0a2
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure9f05d1f6b9d0fc3aa
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure9._closure1638d99e021eff24da
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure9._closure174f383134cb88c8fb
groovy.tmp.templates.GStringTemplateScript84ead2b6bd597d5dfe
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure17300764f730cb3db
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure10c2d6f54898b24086
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure11f7600941ff741d9a
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure11._closure24c23c02b33531e672
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure11._closure25ca1066bfb848eb3a
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure1217b8f7bf9eca2f70
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13ada103c74b2cf6a1
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure26ac4ed4c5e7d145aa
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure26._closure32b3fb827cdfd77dc2
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure27d7b37f7f06ba2678
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure29e47d68773f5bab38
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure29._closure335c6fc52d545cdfde
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure30fe64eb6cb9111c2d
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure31f2d0b46705c10b91
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure29fd0e4b1de5045b3
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure3f9bbf625e40dc019
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure48d089ce4b4df00ec
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure5f72b72b66ce6ee52
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure5._closure143672f254fcb9416f
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure6ce2f614b630c0aed
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure79294eef9349c8620
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure7._closure15311d61890c2359e2
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure825450942285c2878
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure99751d05aad6a0082
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure9._closure1654b4e6cd7332301f
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure9._closure17d70f839013345967
groovy.tmp.templates.GStringTemplateScript85a557694fbe5c679e
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1a2db3cb60dcaaf38
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure105a19ea0de98b7bdd
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure118fa8c65eba0524a1
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure120e96ec84c6ce5f3d
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13dc3dae01ad824df0
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure268d0d85c0354fb442
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure26._closure328bcedd6678c4455f
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure272c14d581abc38398
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure299daf80c8425db61d
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure29._closure335577cdbb302784bf
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure30a68bb7ebe321c8ef
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure31f182037408bd9998
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure2e531bdca4fe5b07b
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure3896932f03c1960c6
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure437680a2a5a565d16
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure5c30eee8fbcd556ff
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure5._closure1407168fda2c2e3359
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure6a0492407e817a64f
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure77d2620deed204cb5
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure7._closure150eba0505992c484d
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure88c99d33ff0c60050
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure970884951a6109b57
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure9._closure1610a09839d1abec33
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure9._closure17446865aae066b255
groovy.tmp.templates.GStringTemplateScript8675d90958973f293e
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure160b6e3bd8e808a1c
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure104348cbc27ac03631
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure1106f1977f75966fec
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure1225e4c1c92ec2cfea
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure134e98584a86718003
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure26eec876ce42eca67a
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure26._closure32c3913c4991f10cf8
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure2790fc2a825c496db9
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure2917d8b909c5579172
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure29._closure334e5fd4019caa691c
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure304fba52620d70b5a9
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure31f475da411f382f83
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure26a125646fd3bae23
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure3181e7f8e542481a7
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure449c9b17969cdbb19
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure59f604ac5cc819f08
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure5._closure1454ba09495d97a503
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure612e3ebd2753b53a9
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure7fdf172b687e5130b
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure7._closure154e53a890263d7abc
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure8c6fcbdb999687829
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure9e8e2e24cbb9f3729
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure9._closure16dc9c1b2436018847
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure9._closure1741c04fe5f5918f02
groovy.tmp.templates.GStringTemplateScript873a5cd6aa701e135e
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1b16da944f04696ff
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure10db87d4870bf90d6a
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure117e39586030e756d7
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure123ccadaf276c6bfa7
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure133f04f58c60df3b52
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure26cf8b27cb90725792
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure26._closure32fba4635336e23465
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure276b5b807cf130c859
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure296e0a51b6b8518c57
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure29._closure334747dc97f8d1327d
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure3017550ee55740616b
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure31f7276d521244bd8a
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure210f30f3d6c8e5beb
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure368ccbb5b8c302178
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure4f3a927b78744e6e3
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure5ab45d6fc1cb227a5
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure5._closure1465de74c78d00d735
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure67c85ae9efe20ff0b
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure71243bc915e59d99e
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure7._closure1571f4cc1cb3326b13
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure86f2067c441f25001
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure90f3b7b47b0e5acfc
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure9._closure16988865d09498546b
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure9._closure17d2a7a9df06c36430
groovy.tmp.templates.GStringTemplateScript88c8eb36e1fcf0677c
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure11ab70a617e242749
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure10a7927276d59f7736
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure10._closure2234df123fd71173f5
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure10._closure2343e83d795944b15e
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure1102054dc6c13930ad
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure12ba7042883ef86c2c
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure133736daebe4e3c16e
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure26935918fc395d0d4b
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure26._closure32228406c37b025b5f
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure27f4108370d8919eff
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure291da18f7123733687
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure29._closure3330cfa3c7e66b6952
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure303aa37d4b0054e937
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure31e50fd0b35bd7d3fd
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure2735f4a8315283cd0
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure36c66c3dc44fa459e
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure4c78e71a87ab098d1
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure53691e39fadb5c98f
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure5._closure14c8c0e8193a5f1806
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure6d4845e1d17bfdd76
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure741c9a7589f89f8db
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure7._closure1582bbd7dff0679227
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure8bcd0b35b8ee5c99d
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure926fb7c2edb54b179
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure9._closure160446eabaed9ba1cd
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure9._closure17c1af2aac44e8ac3b
groovy.tmp.templates.GStringTemplateScript89876ee9131bd15d1c
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1cb6c409800e23baa
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure103f5d6d33a4a64c6d
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure10._closure22088445da05425884
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure10._closure23fc5561e34265a559
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure117acd82d984480996
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure12a35e59b366fc1c61
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure1346aa772d024d7a3f
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure26b21a49f9ebc3fca3
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure26._closure321ab159d9dc1163c2
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure270fb7298e75e83b1f
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure29647367ce5e752ba2
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure29._closure3339d7ab5182103233
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure30624c21cc5a643df5
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure31e65d67a056ab41f4
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure209be13f8849dc918
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure31cb407099ceee541
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure47deee7669439c52b
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure502b47fa67d867122
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure5._closure14f9a49597eac86a30
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure6bae21b519ca471d4
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure7ae7b697f4635324e
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure7._closure15bd1cb35365688388
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure8150c6926567fe1b5
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure9c122e525d02e2aac
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure9._closure164052944e4f027de1
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure9._closure1752c8cc96b7ba4709
groovy.tmp.templates.GStringTemplateScript9b3c7cac0f7d52c63
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure17f50d186805bd846
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure1057cdce4cd3e2815c
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure11caa1dee8b9835763
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure129434cac3628fd6c7
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13912b5be5a8b66868
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure26f7b79161cb7963e1
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure27cc9f14bfb896bf4f
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure29a5fb2eae0513ddeb
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure303e3c64bfa153a095
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure31cd7c00e1356a47bd
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure202bbeec172d08a20
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure389b1934ed98cd6fa
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure438ec09f0f5a2016f
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure5e0b1afcaa216c078
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure5._closure142cdd3cd195903190
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure67f69f156d5de03de
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure76c1f131330471c0f
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure8756eadc6a89d9619
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure9951380fea8a1bd3b
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure9._closure16ee51489500499067
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure9._closure1713a4f1139be61c28
groovy.tmp.templates.GStringTemplateScript903e8a4ca9371e9545
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1d46c86e0716a0649
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure1009224742192794d2
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure11866bfdf3f5f5884e
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure11._closure2473dd067399af189d
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure11._closure25e04aa98b4ed79a31
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure1215e9b549c583ea34
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13ad1228711770b55f
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure26a6e2d383a8781cfb
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure26._closure329ae6cb49593c8c8e
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure27dbd6732f4df1c85f
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure29823f1862743ed9b0
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure29._closure3323b6ff7c53d5c901
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure309ae1762d56887ff0
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure31f3a93a9c233e3f27
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure27daf6006e3162767
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure32c1037b6518757c4
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure423d0a749c014fe5a
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure5c37927cd15f83f5e
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure5._closure14db72601230341ec5
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure62ab8123c03e9a3c8
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure788c064a875b610bc
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure7._closure15282f54dfd48a104f
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure8691fbc6f37ec12f3
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure9c0a06def8b8b154f
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure9._closure16c9e109610df7d972
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure9._closure1764637c9de48ca747
groovy.tmp.templates.GStringTemplateScript91710f935bd03faf25
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure105b7cc190fac1aaa
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure1091ed5807681eaf89
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure10._closure221ad2a1a372095026
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure10._closure23a510386f01762473
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure11fea332ecb084b175
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure120cc7ae729d879a79
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13dc8e85b7f1de0e0e
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure2687a182867ae6ed13
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure26._closure32a2d39453fe2fb413
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure272071d9d1e0886dbf
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure29fbedf0dd0938c495
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure29._closure332aaef7ea37ae9260
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure30c20e2aaa0cb8ab32
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure31f0fb8d8f2e42ad2e
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure2074e397d72a3d2af
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure35cc2f3638993f71b
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure499b031872e9da3a0
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure5f75cbbf4c5cb87f3
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure5._closure14ea161d9ce0a36cf3
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure644de577088f20f6a
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure76772aa8fac0ada29
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure7._closure1517883053418501e0
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure8c0c36612ef763adb
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure92779f4e480f18e9a
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure9._closure168df57795af6e055e
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure9._closure17f7049aa717de4c75
groovy.tmp.templates.GStringTemplateScript92a181f34cf95ce185
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1c7da13128ce63f8e
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure1088bc79c8fb55e265
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure1177fa63cd7f17fa38
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure1227b5833f758b0aae
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure134e2b73fcda2dc3fd
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure26e46471880d45ff2b
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure279c9926d21702839e
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure29719ac91c8e32e3fa
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure302b3fcf23e2e9d674
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure31f50c54ba39c71b35
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure2886dd2f1c07dccf7
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure3cdb5be1de1ae167a
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure4e7118ad41d0645af
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure5ab321fbeb59f4e04
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure5._closure14b9ba9b0f911afaa9
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure6f67498a515defa8c
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure7e7a5f8e7c6cf8597
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure7._closure1557619dc6fe943311
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure88aa6089486d842a2
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure9bf135ff99d7e22e4
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure9._closure1641c9f48848c4612a
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure9._closure17f2acb0e802297122
groovy.tmp.templates.GStringTemplateScript93ee042cbe1e7ddbe5
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1160159ebf220236d
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure101073668d8a6cd93e
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure10._closure2262640e68d6af06c4
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure10._closure236a6a815b37340c7c
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure110f32acd23a66c303
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure123e9b98042d8f7ae3
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure133fb7de3a3c8378ac
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure26c527208ddfdb0ec3
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure26._closure32d2b92a66b009c529
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure27673e8c2cba7b267e
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure29084821a3f334fedf
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure29._closure33389ee6c6ff5824a2
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure3073d093a4b8d902b6
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure31f65ee3a934bb893c
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure2f28c8b8a51c8393f
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure3bd677ac839bab6a5
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure45d711c1af38f1855
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure59f17838765acf6a9
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure5._closure1488dee681418d889f
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure69812dde99ec5562e
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure7081736c01f734f02
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure7._closure1568c6f94a6b9b22be
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure8237ad2e95e426a8a
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure958cac6f29604b931
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure9._closure1605dd8a7cea5dbd06
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure9._closure1761cb56d2f17b9a10
groovy.tmp.templates.GStringTemplateScript94b09d3362ab9a7cc4
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1f301ad058a7275c7
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure10ba1e3a57ddc379bd
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure11d548c18ee0316ca3
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure127151d9a4a5922b00
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13db609f6a8dca581a
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure2623ef9794e203db5b
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure275548d8d5f8175fdd
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure29d574ba9f8026ad25
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure30495c04303e4b2cf9
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure31fee3e6d016cc7703
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure2262a05e8a5c1f046
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure35f5b24e131d5d4b9
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure41a52fc727a3189b1
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure513ef572a5536ddea
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure5._closure141ee396297269d61d
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure62321070e2f871141
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure7560b5c3713453aea
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure7._closure15d6b2c6ed80b656f3
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure81e6cd5985584b250
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure93fc609c3a6617a19
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure9._closure1669b0f2b38790a9c3
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure9._closure17f9fce47629c70b8c
groovy.tmp.templates.GStringTemplateScript95ff18ec904cbb46a4
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure122dae7fcf4b46924
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure1022d12512acfa42e6
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure11ad800e91a5405598
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure12687fc29ffd965b4d
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13aafc32ac6b64e34b
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure2602acc691309d2ab3
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure27aeef722b556efa3d
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure29aca65220fd20b000
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure3011b358b7647bf83b
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure31fdb151c31bb0e50a
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure25ccb5c933474058e
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure32f89e034e9c17466
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure4a0326abc94b8d44b
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure527cacb1385056547
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure5._closure142f87eba7a2fea42b
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure64d474242a49cbde3
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure7b9b99210caf9f07f
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure7._closure15e915a26115b9475c
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure8b7b00fe58d1e9a78
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure9d81f90c8ad1be1cc
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure9._closure162da48c47250975ef
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure9._closure176a9b024cda95e0be
groovy.tmp.templates.GStringTemplateScript962f968c8765d80804
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1e0b738f777fe4c00
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure103b8004dd3fb10f0a
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure10._closure22af52061a4db08071
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure10._closure23fb22afa940914864
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure1124d95fb06ad31ed5
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure12430defd2159acb9a
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure133859c4e740972eb8
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure266169359f473e388b
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure26._closure320a5909168b561fc0
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure2712078d28a2e4141c
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure2926d16be17a2a976f
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure29._closure3315e6cc090ace1247
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure30f882bd3e8a2a857d
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure31f84688f60c355311
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure2d3e8b71f86aa1bd6
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure3befead4a81fc9507
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure4de93d1efa7233244
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure57ba46f59f551acb0
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure5._closure147c2b6d34d3473271
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure6ffed8d9739b04805
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure7396ec078a03cafc1
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure7._closure15a9fc0ff4aaa875ad
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure8fdd56163e4b0e201
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure940753bd5b0944db2
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure9._closure16e1980f5ac2a3119b
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure9._closure176f332803cf62dde9
groovy.tmp.templates.GStringTemplateScript976013537582f93264
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1316c720e093850e3
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure10a34f1b984e883451
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure115c1190af2fa227ee
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure125a23f4e94d9ebbd7
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure1349c56921a63995e9
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure26402a649a95a0c963
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure27e9a027d60f9db1fc
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure295f03835e072c8a4a
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure30a06de1b9d01a51bf
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure31fb143fe50149c118
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure2a909ee64171fee1e
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure3ce2c699f59e835d8
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure464f3472149aa6fbe
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure54f81f3602562141d
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure5._closure144d4f10ba03d04047
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure6918bc8dbb2abe4a7
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure7d6dc0e5f79806554
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure7._closure15965b6b783fa76402
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure85409bb1e3c2aca29
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure9a7aca2debbeed667
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure9._closure16a58c71ae603acdb7
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure9._closure17fc54ce393c3036db
groovy.tmp.templates.GStringTemplateScript9892a4b33e0e174646
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure19ab6d12b875ae155
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure10df5abd6990ee4e0d
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure11202d8509de7c4194
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure12dc996c9305a0685c
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure1341f7464622056fd5
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure261cf85bad3c8f93ba
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure26._closure32eb4c339c61a54867
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure2776eb24da263ce75a
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure292ca85d999c0e309a
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure29._closure336b76bbcf700f1209
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure308d9b9217870ed9e3
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure31e93c820448daaf6f
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure2caa5abda6eb98925
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure3ca8611189122513e
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure450d4113eb45e118c
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure5d255c6039465fa37
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure5._closure14e0518c64b48f8f74
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure6398a38585b34c6da
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure785561596b8504411
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure7._closure15651470bb7cf29d36
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure887f96f81f33d53b5
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure98e6ca5b7d05fcbe2
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure9._closure163942fec419393811
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure9._closure17ef5c4d4a7e1bfed0
groovy.tmp.templates.GStringTemplateScript99dd216ccce9367c26
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure14b6d9bd2f99cfdb6
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure104795a22ce1d77556
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure1158e54a169b0d78af
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure12c5b777a85da41811
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13306beb80c4abd484
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure263dbb0aa8ee116252
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure26._closure32d3796c86c6b670fa
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure278d4c8e248b4542ba
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure29557ab526e1082dbf
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure29._closure33626eb35914744968
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure30d574ce90dd3e0d21
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure31ea6e351745a63d66
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure2b044f2a1ff0c7ced
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure3ba54d5cd4936f1e1
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure4eab487f05ad74c76
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure5e6705a3a4456429a
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure5._closure14d135f1ea6418fd42
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure657ec7d14d02f6a78
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure76ae4dbb161ec8e84
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure7._closure155ab31437e9fd8c99
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure82e25b5fc2ba77b9d
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure969b53cbcdb255037
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure9._closure167d568030bba0e43d
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure9._closure177c3bab708d4915e2
groovy.util.CharsetToolkite32ee0ccebd9426d
groovyjarjarantlr4.v4.runtime.BailErrorStrategyec893019104e5de0
groovyjarjarantlr4.v4.runtime.BufferedTokenStream9edea11848c16ab1
groovyjarjarantlr4.v4.runtime.CharStreams18ba250ef0d2228e
groovyjarjarantlr4.v4.runtime.CodePointBuffer276e1b0594ec6570
groovyjarjarantlr4.v4.runtime.CodePointBuffer.1b567489ad32c5c44
groovyjarjarantlr4.v4.runtime.CodePointBuffer.Builderf6fd8746083fac88
groovyjarjarantlr4.v4.runtime.CodePointBuffer.Type075f2cffd659d26f
groovyjarjarantlr4.v4.runtime.CodePointCharStreambe57283fefe8aa47
groovyjarjarantlr4.v4.runtime.CodePointCharStream.1ec81551669de336b
groovyjarjarantlr4.v4.runtime.CodePointCharStream.CodePoint16BitCharStream842d767cbeba21dd
groovyjarjarantlr4.v4.runtime.CodePointCharStream.CodePoint8BitCharStream73d7a078deb6583b
groovyjarjarantlr4.v4.runtime.CommonTokena900f9bfc386b090
groovyjarjarantlr4.v4.runtime.CommonTokenFactoryf9370b67223e9dee
groovyjarjarantlr4.v4.runtime.CommonTokenStream2ae90a2744f2feba
groovyjarjarantlr4.v4.runtime.ConsoleErrorListener54898f74421da399
groovyjarjarantlr4.v4.runtime.DefaultErrorStrategyd6c4f551868c6ae0
groovyjarjarantlr4.v4.runtime.InputMismatchException5947a7e0806fe51f
groovyjarjarantlr4.v4.runtime.Lexer70e19ee4f96f130e
groovyjarjarantlr4.v4.runtime.NoViableAltException95605b316ff39fd7
groovyjarjarantlr4.v4.runtime.Parserb368c29efe0915fa
groovyjarjarantlr4.v4.runtime.ParserRuleContextecee4c389ca83f25
groovyjarjarantlr4.v4.runtime.ProxyErrorListener020c6e81d2942884
groovyjarjarantlr4.v4.runtime.ProxyParserErrorListenerd536d126ce3e52cd
groovyjarjarantlr4.v4.runtime.RecognitionException93e1a099e5a063c6
groovyjarjarantlr4.v4.runtime.Recognizer9cbb49d3759c9e53
groovyjarjarantlr4.v4.runtime.Recognizer.11d4d1c000b8fa7de
groovyjarjarantlr4.v4.runtime.RuleContext9125107abcfe0029
groovyjarjarantlr4.v4.runtime.VocabularyImpla4742ae9746647a1
groovyjarjarantlr4.v4.runtime.atn.ATN3c1612513cd71daf
groovyjarjarantlr4.v4.runtime.atn.ATNConfig87b4e98ec6659d61
groovyjarjarantlr4.v4.runtime.atn.ATNConfig.ActionATNConfig316d9c5f3aef104d
groovyjarjarantlr4.v4.runtime.atn.ATNConfig.SemanticContextATNConfig2ec5e3aa147b4dac
groovyjarjarantlr4.v4.runtime.atn.ATNConfigSetfbf08f3ad99cf10a
groovyjarjarantlr4.v4.runtime.atn.ATNConfigSet.ATNConfigSetIteratoref83afc309489d4b
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializationOptionsaf1a22b0808c3290
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer94f89f15ca5c0834
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.10f4d27b20ecc297e
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.26854227c041da44a
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.31917f767b9e6af62
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.UnicodeDeserializingModee31dc52e2eb2e097
groovyjarjarantlr4.v4.runtime.atn.ATNSimulator7a6e18dd958fa55b
groovyjarjarantlr4.v4.runtime.atn.ATNState038bc5d25214121a
groovyjarjarantlr4.v4.runtime.atn.ATNTypeb60e7b4dd90f4f29
groovyjarjarantlr4.v4.runtime.atn.AbstractPredicateTransition185239ff0297ce0d
groovyjarjarantlr4.v4.runtime.atn.ActionTransitionea1f835a363d87b3
groovyjarjarantlr4.v4.runtime.atn.ArrayPredictionContext77c2618aa27e72f6
groovyjarjarantlr4.v4.runtime.atn.AtomTransitione46791d71d85880a
groovyjarjarantlr4.v4.runtime.atn.BasicBlockStartStatee29f52af7d901a76
groovyjarjarantlr4.v4.runtime.atn.BasicState9677f85ba96d0ebd
groovyjarjarantlr4.v4.runtime.atn.BlockEndStateaf5cc75e71100f0f
groovyjarjarantlr4.v4.runtime.atn.BlockStartState7615f7a653acb480
groovyjarjarantlr4.v4.runtime.atn.ConflictInfo01a1ab106c529134
groovyjarjarantlr4.v4.runtime.atn.DecisionState500c977f318732d9
groovyjarjarantlr4.v4.runtime.atn.EmptyPredictionContext807c3530f0c35855
groovyjarjarantlr4.v4.runtime.atn.EpsilonTransition2d6df5a3eb5754f7
groovyjarjarantlr4.v4.runtime.atn.LexerATNSimulatored4b9e9d897a62d7
groovyjarjarantlr4.v4.runtime.atn.LexerATNSimulator.SimState17deca1763affe77
groovyjarjarantlr4.v4.runtime.atn.LexerActionExecutor710d862211275859
groovyjarjarantlr4.v4.runtime.atn.LexerActionTypefbbfc71a976e10f6
groovyjarjarantlr4.v4.runtime.atn.LexerCustomAction7c21bfac6fdfb9b1
groovyjarjarantlr4.v4.runtime.atn.LexerMoreAction1895b1cc6cad71a1
groovyjarjarantlr4.v4.runtime.atn.LexerPopModeActione5f3009d63995b50
groovyjarjarantlr4.v4.runtime.atn.LexerPushModeActionfaaa593f24b29471
groovyjarjarantlr4.v4.runtime.atn.LexerSkipActiona39978b642ddd219
groovyjarjarantlr4.v4.runtime.atn.LexerTypeAction2e298742be37fdff
groovyjarjarantlr4.v4.runtime.atn.LoopEndStatec36875404e8650ae
groovyjarjarantlr4.v4.runtime.atn.NotSetTransition7e2598267a791bc9
groovyjarjarantlr4.v4.runtime.atn.OrderedATNConfigSete6b6a87504506d7e
groovyjarjarantlr4.v4.runtime.atn.ParserATNSimulator46dcf633d226b923
groovyjarjarantlr4.v4.runtime.atn.ParserATNSimulator.1a67f69b9a2d69bf0
groovyjarjarantlr4.v4.runtime.atn.PlusBlockStartState950eb3b221c83970
groovyjarjarantlr4.v4.runtime.atn.PlusLoopbackStatecd333b86c5ada5dd
groovyjarjarantlr4.v4.runtime.atn.PrecedencePredicateTransition1c61ffe26f9092ba
groovyjarjarantlr4.v4.runtime.atn.PredicateTransition7c4930d85bb77bfd
groovyjarjarantlr4.v4.runtime.atn.PredictionContext59b1d15ea1a2a6fc
groovyjarjarantlr4.v4.runtime.atn.PredictionContext.IdentityEqualityComparator8683bee820fd39b7
groovyjarjarantlr4.v4.runtime.atn.PredictionContext.IdentityHashMap50bbaa5ba4ad7185
groovyjarjarantlr4.v4.runtime.atn.PredictionContextCachec2bb5aef8776111e
groovyjarjarantlr4.v4.runtime.atn.PredictionContextCache.IdentityCommutativePredictionContextOperandsceffe5ccb37b9d0b
groovyjarjarantlr4.v4.runtime.atn.PredictionContextCache.PredictionContextAndIntb189f01a1351169c
groovyjarjarantlr4.v4.runtime.atn.PredictionMode2ef0d95d46f3c75b
groovyjarjarantlr4.v4.runtime.atn.RuleStartState9a99431d6c4613d6
groovyjarjarantlr4.v4.runtime.atn.RuleStopState2f0668aeae6bcf2f
groovyjarjarantlr4.v4.runtime.atn.RuleTransition87a24fa09f805482
groovyjarjarantlr4.v4.runtime.atn.SemanticContexta7c8b4db55d1b4a7
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.AND7d2db69c71cd7cd2
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.OR030d0fbfe63f8d93
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.Operator1c4b988fd3055744
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.PrecedencePredicate3e2139e9eb1e329e
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.Predicate4116e62316010777
groovyjarjarantlr4.v4.runtime.atn.SetTransitiona7c174f593243f60
groovyjarjarantlr4.v4.runtime.atn.SimulatorState7adda50a77d1c110
groovyjarjarantlr4.v4.runtime.atn.SingletonPredictionContext2b31768eb6abe7b1
groovyjarjarantlr4.v4.runtime.atn.StarBlockStartState2b35ef26d4d95f67
groovyjarjarantlr4.v4.runtime.atn.StarLoopEntryState78a75e6ceb809cb6
groovyjarjarantlr4.v4.runtime.atn.StarLoopbackState2c5cb8c7f4c2c61d
groovyjarjarantlr4.v4.runtime.atn.TokensStartState23f783e361ac457c
groovyjarjarantlr4.v4.runtime.atn.Transition54b58d81406bbef9
groovyjarjarantlr4.v4.runtime.atn.Transition.1ee3af43fd9bac9f0
groovyjarjarantlr4.v4.runtime.atn.WildcardTransition57d9769454f95a6f
groovyjarjarantlr4.v4.runtime.dfa.AbstractEdgeMap239167b73da5010b
groovyjarjarantlr4.v4.runtime.dfa.AcceptStateInfo71f8f8e9c3e346a1
groovyjarjarantlr4.v4.runtime.dfa.ArrayEdgeMap90123265f3e8937e
groovyjarjarantlr4.v4.runtime.dfa.DFA4fe0a8ac9e29d76d
groovyjarjarantlr4.v4.runtime.dfa.DFAState98f48a0f19241276
groovyjarjarantlr4.v4.runtime.dfa.DFAState.PredPrediction692b6d51c49e3346
groovyjarjarantlr4.v4.runtime.dfa.EmptyEdgeMap347fc069936dfc52
groovyjarjarantlr4.v4.runtime.dfa.HashEdgeMap86a2e455d60fe23b
groovyjarjarantlr4.v4.runtime.dfa.SingletonEdgeMap44748d3c71038e6f
groovyjarjarantlr4.v4.runtime.misc.AbstractEqualityComparator43b6feed4bca831d
groovyjarjarantlr4.v4.runtime.misc.Argsfba0d04c5f74b296
groovyjarjarantlr4.v4.runtime.misc.FlexibleHashMap070d779bd1f328b8
groovyjarjarantlr4.v4.runtime.misc.FlexibleHashMap.Entry7032642ae5fdc7bf
groovyjarjarantlr4.v4.runtime.misc.IntegerList488cbe4ae7cf38cc
groovyjarjarantlr4.v4.runtime.misc.IntegerStack01df2b26d96d8da0
groovyjarjarantlr4.v4.runtime.misc.Interval3a1d1d1e5327d362
groovyjarjarantlr4.v4.runtime.misc.IntervalSet2ad989202da0b1bd
groovyjarjarantlr4.v4.runtime.misc.MurmurHash49015c6052d951a6
groovyjarjarantlr4.v4.runtime.misc.ObjectEqualityComparatore3cafa7d3408e802
groovyjarjarantlr4.v4.runtime.misc.ParseCancellationException358ba4c539a8f801
groovyjarjarantlr4.v4.runtime.misc.Tuple992477cb69f4ff23
groovyjarjarantlr4.v4.runtime.misc.Tuple2a95d4e1dbca41d1c
groovyjarjarantlr4.v4.runtime.misc.Tuple3d06d082c36db75a2
groovyjarjarantlr4.v4.runtime.misc.Utils692adb468c5802f8
groovyjarjarantlr4.v4.runtime.tree.AbstractParseTreeVisitorc5404fbd1a847180
groovyjarjarantlr4.v4.runtime.tree.TerminalNodeImpl3b05a0ce12e3f53f
groovyjarjarasm.asm.AnnotationVisitorfea9201d7bffdcb1
groovyjarjarasm.asm.AnnotationWriter323e09d81bf9bc81
groovyjarjarasm.asm.Attribute3af882d73d0db3fa
groovyjarjarasm.asm.ByteVectorfd975b2b6c7d0bac
groovyjarjarasm.asm.ClassReader8f13a418bfca55d1
groovyjarjarasm.asm.ClassVisitor52d90bb4d8c959b6
groovyjarjarasm.asm.ClassWriter72fae6fc2eae52b0
groovyjarjarasm.asm.Contextca2921a150a476cc
groovyjarjarasm.asm.Edge1910e7921a3f18d1
groovyjarjarasm.asm.FieldVisitor72c50a59bce3fdeb
groovyjarjarasm.asm.FieldWriter56805825350ada0e
groovyjarjarasm.asm.Frame788c848548d9b49c
groovyjarjarasm.asm.Handle73a17d7eac7a53e7
groovyjarjarasm.asm.Handler3d092ae20d0ebd09
groovyjarjarasm.asm.Labeldc2caedb1f596ae8
groovyjarjarasm.asm.MethodVisitor47fc8fc058b8ce7e
groovyjarjarasm.asm.MethodWriter23eb407d79137531
groovyjarjarasm.asm.Symboled5e1361384c7418
groovyjarjarasm.asm.SymbolTableed3b5c7a288e9d12
groovyjarjarasm.asm.SymbolTable.Entryc7bf10b4f194a15f
groovyjarjarasm.asm.Type34b8380ea8561271
groovyjarjarasm.asm.signature.SignatureReader416186917e6bf134
groovyjarjarasm.asm.signature.SignatureVisitor1a349e7940b61e5b
it.Calculus_Stress_Testb98686df037e60a9
it.Calculus_Stress_Test.__spock_feature_0_0_closure25af1621171639885
it.Calculus_Stress_Test.__spock_feature_0_0_closure36dabf694878f462a
it.Calculus_Stress_Test.__spock_feature_0_0_closure4c1aa82f941bd723b
it.Calculus_Stress_Test.__spock_feature_0_0_closure53aa1972de5d30c8e
it.Calculus_Stress_Test.__spock_feature_0_0_closure6e0c135d62ebe7a37
it.Calculus_Stress_Test.__spock_feature_0_2_closure74717e5dd331b5307
it.Calculus_Stress_Test.__spock_feature_0_2_closure84ce44e60af3172f8
it.Calculus_Stress_Test.__spock_feature_0_2_closure997cd772f9934e6b7
it.Calculus_Stress_Test.__spock_feature_0_3_closure10d3247ea53298a66b
it.Calculus_Stress_Test.__spock_feature_0_3_closure118b243a5cf93f047d
it.Calculus_Stress_Test._setup_closure17f145afb6362eab7
it.Cross_Device_Sliced_Tensor_System_Test75edbcc80a016c0a
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_0_closure2c07186a9f6654e24
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_0_closure3266d0d18e66d0376
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_1_closure4683d540c386d9af4
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_1_closure590adb271d5d802d7
it.Cross_Device_Sliced_Tensor_System_Test._setup_closure1abf32d88baa563e2
it.Cross_Device_Spec99e5aac3bf824a4f
it.Cross_Device_Spec.__spock_feature_0_0_closure48d551b46ce287649
it.Cross_Device_Spec.__spock_feature_0_0_closure5d92858bd02246234
it.Cross_Device_Spec.__spock_feature_0_1_closure6f7219e8c2188edfe
it.Cross_Device_Spec.__spock_feature_0_1_closure75c7ecbab1ef5e2c2
it.Cross_Device_Spec.__spock_feature_0_1_closure844b43a0326dc435a
it.Cross_Device_Spec.__spock_feature_0_2_closure10bacbf2794217ef27
it.Cross_Device_Spec.__spock_feature_0_2_closure1140a71ec9bc13b58d
it.Cross_Device_Spec.__spock_feature_0_2_closure966cff18e0043dc25
it.Cross_Device_Spec.__spock_feature_0_3_closure12d31ff1a29656cb4f
it.Cross_Device_Spec.__spock_feature_0_4_closure13793971f6f06dd1b7
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure148b5f23eaa9c1270a
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure15df73c7848ae603f1
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure162306eb36ef8f6efc
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure17772a0f58cca84a07
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure1804b065340573b6fc
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure19509c815a26549207
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure2007d35351c63b44f0
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure2153ffb73fe51c600b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure223d0f308ec5b64a29
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure236923d4e0e6916ed2
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure245174bb9f843c84f8
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure2543b816a12946fe61
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure263ce3bba06e92946c
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure279f184fd06d6f59fe
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure28a72b0dd968655a28
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure2926c3cd64f8db8fa8
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure306750939ddaf1520a
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure319ff055c2650a7819
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure32f0b02e5cd072820c
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure33447d847c23e79809
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure34ffa5b9d6568a2eb0
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure351b46e91ed7bfdae8
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure36ce2f724ade56665b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure378370b02d8f47f290
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure382d5a37f1995dce39
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure3952b726fc8f07973d
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure400912735522112bc6
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure41405d991892717dca
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure428f001b755a55e674
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure43293ac30aca733bdf
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure4468bf0001e5ac4cab
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure45772492c38bad02ff
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure46a58311524697579c
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure47fbf4772e7a77b7f5
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure48a310349cb414352c
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure493a3fe37e17c55935
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure5099b18190bf05baba
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure5137ee441023b934cf
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure5215c3c66beee758c1
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure53bb7f221ed2de9a4a
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure54b2f17a3ae3f17ce3
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure55a2fecb803b48b5c3
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure56425e88b287954c56
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure57395721bf46afad23
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure58016463b643a5aef5
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure59c7851045ead5c40a
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure60c9ef6d7f0b6893c8
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure6192ea654d8f6e4972
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure625126d387e4b2fff4
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure633ab31039e0c9698f
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure6471e12c7995c0e65b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure65165cd9913ddbeb83
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure66305f8db70f998242
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure672e2f80576511df4d
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure68db6fc019afb2ca1a
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure698c3cb24c4c1b7a0d
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure705ed4e885d869ecc1
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure7139289e8e70cffa96
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure72f72aa69451ae3add
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure735194c6572853eb83
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure741756ba13b3483014
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure75702d080c7183e023
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure7652008a77bcdd8c2d
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure778d82642b9ca5dfa9
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure78e8bf8b485f54b1f4
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure79d08ae62815f29aae
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure800e67675316c08a39
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure8129b6391fa35cfa8d
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure820b9bbb646e029683
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure830587a5cb39606aad
it.Cross_Device_Spec._cleanup_closure35ee8a400f338ed03
it.Cross_Device_Spec._setup_closure17c06c3e217973df3
it.Cross_Device_Spec._setup_closure2beb56ac082cabe2c
it.Eleven_Lines_NN_System_Spec3d25b4077871cc1e
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure14c6c38e7663a592f
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure2189f354f2c3dc548
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure36b72ccbcb8cd9d66
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure4d03c2079564a3076
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure5a3d1d98ac2ba6858
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure10a4a36cdf8f0eec48
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure6700d375691ebec53
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure737ae91780d29cdac
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure82340b33e447a8415
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure9070e40fed2a5a5e0
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure11399f007a30f8e81e
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure12f16762e0cdd0d372
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure1376004276669ee072
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure148058a6ca37d68070
jdk.nio.zipfs.ZipCoder191f0507a47fea3b
jdk.nio.zipfs.ZipCoder.UTF8f8f119087a982b5f
jdk.nio.zipfs.ZipConstants624503f314e94dd6
jdk.nio.zipfs.ZipDirectoryStream324de2ebec818480
jdk.nio.zipfs.ZipDirectoryStream.1c30d0e2084cdcbf9
jdk.nio.zipfs.ZipFileSystem6eaace325c477fc1
jdk.nio.zipfs.ZipFileSystem.ENDb2e387c50e212c78
jdk.nio.zipfs.ZipFileSystem.Entrya2dba157ef2e2ef0
jdk.nio.zipfs.ZipFileSystem.IndexNodeb4ee3bd988c23360
jdk.nio.zipfs.ZipFileSystem.ParentLookup9bd545d1d73a3787
jdk.nio.zipfs.ZipFileSystemProvidera057a5ebf6539577
jdk.nio.zipfs.ZipPathcc1ad1205c32b235
jdk.nio.zipfs.ZipUtils32eeecbfc1fb3f55
kotlin.Paire100b24a4c325b6d
kotlin.TuplesKte7971603d2212261
kotlin._Assertionsb229726f0965c435
kotlin.annotation.AnnotationRetention5eab1506549a0bb4
kotlin.annotation.AnnotationTarget6e891352593a72d2
kotlin.collections.ArraysKt___ArraysJvmKtd0bba98108b6e8a6
kotlin.collections.ArraysKt___ArraysKt629710fec9be504e
kotlin.collections.ArraysUtilJVM9595b65dba34e5b6
kotlin.collections.CollectionsKt__CollectionsJVMKt9349d63e0c317a38
kotlin.collections.CollectionsKt__CollectionsKtd86eb1665e8d2000
kotlin.collections.CollectionsKt__IterablesKtb9445f45f9493df0
kotlin.collections.CollectionsKt___CollectionsKt26e57119d4f5d539
kotlin.collections.IntIterator398d12bf64b21ae2
kotlin.internal.ProgressionUtilKt4a790089b5d5521e
kotlin.jvm.internal.Intrinsics8aa8b05ed43cfd35
kotlin.jvm.internal.Lambda96797d3f1cb70af4
kotlin.ranges.IntProgressionbb7245ad46d13720
kotlin.ranges.IntProgression.Companion871137d96bea6bb7
kotlin.ranges.IntProgressionIterator1b0a5b9a7cda1519
kotlin.ranges.IntRangeadc68e8bd82461ed
kotlin.ranges.IntRange.Companionb682faf658e8a02e
kotlin.ranges.RangesKt___RangesKtf4369ffaaf16b082
kotlin.sequences.SequencesKt___SequencesKt0f0e219bcfaf0ff4
kotlin.sequences.TransformingSequenceeb560009f8ff634e
kotlin.sequences.TransformingSequence.iterator.16dde59aae7a6f0af
kotlin.text.CharsKt__CharJVMKtb8a074a973cbeee8
kotlin.text.DelimitedRangesSequence7d4527bd1241c14f
kotlin.text.DelimitedRangesSequence.iterator.16ba5542cd0950335
kotlin.text.StringsKt__IndentKtef46628fe5377225
kotlin.text.StringsKt__IndentKt.getIndentFunction.1c957427320bb1200
kotlin.text.StringsKt__StringBuilderKt95e7e8958538d572
kotlin.text.StringsKt__StringsJVMKt2d8bcc47125514cb
kotlin.text.StringsKt__StringsKtd98ed848fcc3161f
kotlin.text.StringsKt__StringsKt.rangesDelimitedBy.4428b7edef7ac764a
kotlin.text.StringsKt__StringsKt.splitToSequence.148df4be90fd2def4
kotlin.text.StringsKt___StringsKtbd4e7bc8194af12f
net.bytebuddy.ByteBuddy33fbc0829b8e2652
net.bytebuddy.ClassFileVersion041e75a4a43bf8ae
net.bytebuddy.ClassFileVersion.VersionLocator.Resolved5a5903eaf399d371
net.bytebuddy.ClassFileVersion.VersionLocator.Resolverffb81456e25e396b
net.bytebuddy.NamingStrategy.AbstractBase77e9d686c976f6e6
net.bytebuddy.NamingStrategy.Suffixing65bfa03c85847dc9
net.bytebuddy.NamingStrategy.Suffixing.BaseNameResolver.ForUnnamedType1fb9c5c929a4a173
net.bytebuddy.NamingStrategy.SuffixingRandomcdbdedcf0cea0a02
net.bytebuddy.TypeCached02df3631a17fa08
net.bytebuddy.TypeCache.LookupKeyb75da15a4577d948
net.bytebuddy.TypeCache.SimpleKey99731a44c3f39c30
net.bytebuddy.TypeCache.Sort3f135d4f310abf3c
net.bytebuddy.TypeCache.Sort.13be4336e35a8cbfd
net.bytebuddy.TypeCache.Sort.25a2bb9e71930a24a
net.bytebuddy.TypeCache.Sort.35792db85826ac4ba
net.bytebuddy.TypeCache.StorageKeyda984e48de27d4a8
net.bytebuddy.TypeCache.WithInlineExpunction5c74d69cd94d649e
net.bytebuddy.asm.AsmVisitorWrapper.NoOpa613c160b15bbc65
net.bytebuddy.description.ByteCodeElement.Token.TokenList1070489264457774
net.bytebuddy.description.ModifierReviewable.AbstractBase0b625f401d945e23
net.bytebuddy.description.NamedElement.WithDescriptor69f25e85d31086f5
net.bytebuddy.description.TypeVariableSource.AbstractBase86aee374842b91be
net.bytebuddy.description.annotation.AnnotationDescription7e080fcc4ab41eb1
net.bytebuddy.description.annotation.AnnotationDescription.AbstractBase55a8b2f7b58a15aa
net.bytebuddy.description.annotation.AnnotationDescription.ForLoadedAnnotationa2b247526c4d26ca
net.bytebuddy.description.annotation.AnnotationList.AbstractBasec3dca45e359b717d
net.bytebuddy.description.annotation.AnnotationList.Empty10e1e01ec4afb6b0
net.bytebuddy.description.annotation.AnnotationList.Explicitb96636e855735fc3
net.bytebuddy.description.annotation.AnnotationList.ForLoadedAnnotationsa6be8b00fa72ab7a
net.bytebuddy.description.annotation.AnnotationSource.Empty034fcbd435657d97
net.bytebuddy.description.annotation.AnnotationValuee46e60f3e4357d8a
net.bytebuddy.description.annotation.AnnotationValue.AbstractBase6b46c288929d794a
net.bytebuddy.description.annotation.AnnotationValue.ForConstant650f7b88da7502df
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType8683233734d98d81
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.1ecf694f5c718a013
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.2113fe247f14fdcdd
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.3ad40ce4c8d647d57
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.4649136274570c878
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.525519a3723562b18
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.6d0a4ee1eb78e8925
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.75cc6d38c7688ce9e
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.8542fa217a5fe4c51
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.99adc51229ebb26c9
net.bytebuddy.description.annotation.AnnotationValue.ForEnumerationDescription451401174e8ca82f
net.bytebuddy.description.annotation.AnnotationValue.ForTypeDescription256f9475d7baab5e
net.bytebuddy.description.enumeration.EnumerationDescription.AbstractBase36efae2fe3237ba9
net.bytebuddy.description.enumeration.EnumerationDescription.ForLoadedEnumeration5b47cbeca30adac0
net.bytebuddy.description.field.FieldDescription68bfcf27b64f643e
net.bytebuddy.description.field.FieldDescription.AbstractBase8e18b7d4e1ceddcb
net.bytebuddy.description.field.FieldDescription.InDefinedShape.AbstractBasee1174a0c69da5a57
net.bytebuddy.description.field.FieldDescription.Latentf267c31e54d89fa1
net.bytebuddy.description.field.FieldDescription.SignatureToken3fabeebea84ce146
net.bytebuddy.description.field.FieldDescription.Token3f20efc75bd15e42
net.bytebuddy.description.field.FieldList.AbstractBase78739d279005d8a4
net.bytebuddy.description.field.FieldList.ForTokensea98dba6ef4eb758
net.bytebuddy.description.method.MethodDescriptioncb9472a3dd295bbd
net.bytebuddy.description.method.MethodDescription.AbstractBase909086af904cf59b
net.bytebuddy.description.method.MethodDescription.ForLoadedConstructore3c79dd807083c08
net.bytebuddy.description.method.MethodDescription.ForLoadedMethodd9fe344c56539dc6
net.bytebuddy.description.method.MethodDescription.InDefinedShape.AbstractBase673ca3d2d56a4b0a
net.bytebuddy.description.method.MethodDescription.InDefinedShape.AbstractBase.ForLoadedExecutabledb01999a48adc399
net.bytebuddy.description.method.MethodDescription.Latent20e100c8a3802774
net.bytebuddy.description.method.MethodDescription.Latent.TypeInitializer87bee94b36e1d209
net.bytebuddy.description.method.MethodDescription.SignatureToken5888f2557f6a88e0
net.bytebuddy.description.method.MethodDescription.Tokenb268931f291edf88
net.bytebuddy.description.method.MethodDescription.TypeSubstituting8dc21d2e259d2c0f
net.bytebuddy.description.method.MethodDescription.TypeTokenf7f14b8ac76ebd98
net.bytebuddy.description.method.MethodList.AbstractBaseb054427f9b6a48f1
net.bytebuddy.description.method.MethodList.Explicitb03ab4c21a93dfd0
net.bytebuddy.description.method.MethodList.ForLoadedMethods38bd1bf17eb05676
net.bytebuddy.description.method.MethodList.ForTokens40aa960dc7616ac5
net.bytebuddy.description.method.MethodList.TypeSubstitutingf1f510557a04392e
net.bytebuddy.description.method.ParameterDescription.AbstractBase173e1a83772e6071
net.bytebuddy.description.method.ParameterDescription.ForLoadedParameter8dd9bfdcb695c00c
net.bytebuddy.description.method.ParameterDescription.ForLoadedParameter.OfConstructora18e1a81fc7465d0
net.bytebuddy.description.method.ParameterDescription.ForLoadedParameter.OfMethod811597af8855d53c
net.bytebuddy.description.method.ParameterDescription.InDefinedShape.AbstractBase717f5d8d90c005f1
net.bytebuddy.description.method.ParameterDescription.Latent1aa2e08f2ad0d5c2
net.bytebuddy.description.method.ParameterDescription.Token36549650fa40d54b
net.bytebuddy.description.method.ParameterDescription.Token.TypeList1890975119bdb094
net.bytebuddy.description.method.ParameterDescription.TypeSubstituting6cc95e3ea064743d
net.bytebuddy.description.method.ParameterList.AbstractBase6fe6f7a3a2c191ea
net.bytebuddy.description.method.ParameterList.Empty8f4a45d2f54ed28b
net.bytebuddy.description.method.ParameterList.Explicit.ForTypes75d84e0b4fcd99a9
net.bytebuddy.description.method.ParameterList.ForLoadedExecutable1456c072c3be7105
net.bytebuddy.description.method.ParameterList.ForLoadedExecutable.OfConstructor6d7eaa8911075319
net.bytebuddy.description.method.ParameterList.ForLoadedExecutable.OfMethodf0835708e2d15fb4
net.bytebuddy.description.method.ParameterList.ForTokensb77d0ee711552f0c
net.bytebuddy.description.method.ParameterList.TypeSubstituting293f1f350b97c439
net.bytebuddy.description.modifier.ModifierContributor.Resolver4c37457cc5fe415c
net.bytebuddy.description.modifier.SynchronizationState1ee1e76d573ad75b
net.bytebuddy.description.modifier.SyntheticState0ea0b3d14a159257
net.bytebuddy.description.modifier.TypeManifestation823497b74af56cf0
net.bytebuddy.description.modifier.Visibilityeddec8671a9488f2
net.bytebuddy.description.modifier.Visibility.1d7e383ada6123e01
net.bytebuddy.description.type.PackageDescription.AbstractBase21c62ace537a731c
net.bytebuddy.description.type.PackageDescription.ForLoadedPackagee09e684292c5d837
net.bytebuddy.description.type.PackageDescription.Simple69eda86ed42d2bb9
net.bytebuddy.description.type.RecordComponentList.AbstractBasefa2d664156de0c87
net.bytebuddy.description.type.RecordComponentList.ForTokensb72447d1fcbe18bd
net.bytebuddy.description.type.TypeDefinition.Sorte252ac8a021f4082
net.bytebuddy.description.type.TypeDefinition.SuperClassIteratordcc41092c6176f54
net.bytebuddy.description.type.TypeDescription556ed0842dcd3465
net.bytebuddy.description.type.TypeDescription.AbstractBase4c6c19763839e81a
net.bytebuddy.description.type.TypeDescription.AbstractBase.OfSimpleType69db0326a7bad734
net.bytebuddy.description.type.TypeDescription.ArrayProjection6b7a5c44df03385e
net.bytebuddy.description.type.TypeDescription.ForLoadedType1d01f9c5e8968a62
net.bytebuddy.description.type.TypeDescription.Generic56c70a1f7051116d
net.bytebuddy.description.type.TypeDescription.Generic.AbstractBasefd0c51f51c279532
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegatorde36d9ad20e2b0f0
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.Chained7558098cf910a484
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedExecutableExceptionTyped5fff8bf03378188
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedExecutableParameterType4996d253c41df4e9
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedInterface4bff1f5d8d784c94
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedMethodReturnType01b498011a8b99b9
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedSuperClass437ce60855df41c8
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedTypeVariable9eb4905762cd4d43
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.Simple64d7d8c1c95ccbbb
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForComponentType8fcd1b7048059fc4
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForOwnerTypee1af0c43565cac6a
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForTypeArgument6058e72d25275a6e
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForTypeVariableBoundType7abf9451d0a396a7
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForTypeVariableBoundType.OfFormalTypeVariablea3a21029de090e64
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForWildcardUpperBoundType54ede85d3cdd8d27
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.NoOp1e053fccf208e232
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection9e800f4de9f8e67a
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.ForLoadedReturnTyped1cbaafc701825f7
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.ForLoadedSuperClasse8bb65841de31236
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.OfConstructorParameter5458ef91ab867211
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.OfMethodParameter166cd6a7c9bbe561
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithEagerNavigationf8c6ed45e722570c
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithEagerNavigation.OfAnnotatedElementf66f265575aff87f
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithLazyNavigationce47793fa872e3e3
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithLazyNavigation.OfAnnotatedElement3539fe966936ddc7
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithResolvedErasure8c44b0d0f77a173e
net.bytebuddy.description.type.TypeDescription.Generic.OfGenericArray8c17323c9acd37d3
net.bytebuddy.description.type.TypeDescription.Generic.OfGenericArray.ForLoadedType062f3d6b0874a5c9
net.bytebuddy.description.type.TypeDescription.Generic.OfGenericArray.Latentfbae4dabdc6cfa54
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericTypea4504dc6edd68cd6
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.ForErasure3db1821ca68ddf5b
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.ForLoadedType986e223380b52659
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.ForReifiedErasure697dfa2b063e4765
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.Latent37ef0e5fa19941b1
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedTypef7f22a9eb23d0358
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForGenerifiedErasuree0b46fdf5031ad5f
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForLoadedType23a5bcdf87d9f76a
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForLoadedType.ParameterArgumentTypeLista0644f6a2bfa2302
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForReifiedTypedb27116a99bedbbc
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.Latent16bfeb4720d944cd
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariablea5c2d5f7fa973e0e
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.ForLoadedTypecc1d9926ab18067d
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.ForLoadedType.TypeVariableBoundListc5b1a0f68bc48e88
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.Symbolic0bd5958a8dbdf6f1
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.WithAnnotationOverlay4f1a3b7626a1e4a3
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardTypea01bd5a6a70766e4
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.ForLoadedType1112963cd5495a63
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.ForLoadedType.WildcardLowerBoundTypeListbf9a39c834687055
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.ForLoadedType.WildcardUpperBoundTypeListb8d9619983ce411c
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.Latentac2dd5836f375954
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.ForRawType81cdbc3e9e323aa7
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.ForSignatureVisitor87943f76596da78f
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.ForSignatureVisitor.OfTypeArgumentcf8c08548af1937e
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reducing6ba7675a4c6f9e76
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reifying42a0cb6efaa79250
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reifying.14d33792e587387fc
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reifying.2b6f2d04f33382efe
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutorc89ede7a99bb5588
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForAttachmentb59a8a86fd7eb87c
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForDetachmenteed7781949d0b181
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForTypeVariableBinding847d85e296b6129b
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForTypeVariableBinding.RetainedMethodTypeVariable79fdd4bceff9a4bb
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForTypeVariableBinding.TypeVariableSubstitutorf0ccee83b552d2da
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.WithoutTypeSubstitution7cf629e1c38332c8
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.TypeErasingf24b138643bc0d75
net.bytebuddy.description.type.TypeDescription.Latentf096a156f0f28049
net.bytebuddy.description.type.TypeListda60a7cfb717d0a8
net.bytebuddy.description.type.TypeList.AbstractBase4700315364477234
net.bytebuddy.description.type.TypeList.Empty59d00ad7b53c811a
net.bytebuddy.description.type.TypeList.Explicit81495dfc3a359dfe
net.bytebuddy.description.type.TypeList.ForLoadedTypes4356a7471aec6f20
net.bytebuddy.description.type.TypeList.Generic.AbstractBase5376e1d2298a6512
net.bytebuddy.description.type.TypeList.Generic.Emptydf9431d33e66dbb4
net.bytebuddy.description.type.TypeList.Generic.Explicit1ab8c93e54ee2ac6
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes1b6544725fdb45a6
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes.OfTypeVariables05b85732c40f12b7
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes.OfTypeVariables.AttachedTypeVariable8133514c5d90955c
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes.WithResolvedErasure3ae7efc80de7c3db
net.bytebuddy.description.type.TypeList.Generic.ForLoadedTypesc603bfa8790b860c
net.bytebuddy.description.type.TypeList.Generic.ForLoadedTypes.OfTypeVariablesd713fc161a8b3c83
net.bytebuddy.description.type.TypeList.Generic.OfConstructorExceptionTypes41a985dd07ed867c
net.bytebuddy.description.type.TypeList.Generic.OfConstructorExceptionTypes.TypeProjectiona9a42d16f46764ff
net.bytebuddy.description.type.TypeList.Generic.OfLoadedInterfaceTypes99d4f3faf0ed1337
net.bytebuddy.description.type.TypeList.Generic.OfLoadedInterfaceTypes.TypeProjection7f6f3c7654719119
net.bytebuddy.description.type.TypeList.Generic.OfMethodExceptionTypes74966b175ac75ab9
net.bytebuddy.description.type.TypeList.Generic.OfMethodExceptionTypes.TypeProjection2d651d381fd3d0a8
net.bytebuddy.description.type.TypeVariableToken0b904605bce2d673
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase75270e145cd63906
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter07bd203beeec5267
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.FieldDefinitionAdapter9f5304401d876ae6
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodDefinitionAdaptere3a71944fa583d5d
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodDefinitionAdapter.AnnotationAdaptere7b8fe4351f0538b
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodDefinitionAdapter.SimpleParameterAnnotationAdapterceb79ec5f6c81c06
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodMatchAdapterb8cc50e40a151a71
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodMatchAdapter.AnnotationAdapter94485cf54fc4bcd7
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.OptionalMethodMatchAdaptere422d5f0c3871df1
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Delegatora05019b4f9f0052c
net.bytebuddy.dynamic.DynamicType.Builder.FieldDefinition.Optional.AbstractBasea32270443a37f43d
net.bytebuddy.dynamic.DynamicType.Builder.FieldDefinition.Optional.Valuable.AbstractBase8d99c467e26879f0
net.bytebuddy.dynamic.DynamicType.Builder.FieldDefinition.Optional.Valuable.AbstractBase.Adapterd9f7441f5ac2676d
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.AbstractBasecccc417ee29a5050
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.AbstractBase.Adaptere54b40849750fde9
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ExceptionDefinition.AbstractBased5887f98f1e6e4b8
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ImplementationDefinition.AbstractBase8dc07cfd239f0cc9
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.AbstractBaseb54aa1953aca6e53
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Initial.AbstractBaseadcc1447655c94c5
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Simple.AbstractBasece697a0a4ca85f68
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Simple.Annotatable.AbstractBase5cb69ac9bda4bbd9
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Simple.Annotatable.AbstractBase.Adapterc234394f562d67d3
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ReceiverTypeDefinition.AbstractBasef3a488041b9994c6
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.TypeVariableDefinition.AbstractBase1e9f7d7e33a9b032
net.bytebuddy.dynamic.DynamicType.Default1735fd7e9316f797
net.bytebuddy.dynamic.DynamicType.Default.Loaded3ce15da51999a8ca
net.bytebuddy.dynamic.DynamicType.Default.Unloaded7dc216f75a73e9b5
net.bytebuddy.dynamic.TargetType26c139b5f2f58862
net.bytebuddy.dynamic.Transformer.Compounda5a52522b43091ef
net.bytebuddy.dynamic.Transformer.ForMethod22ab387d59f6c970
net.bytebuddy.dynamic.Transformer.ForMethod.MethodModifierTransformer829c18ff395159ba
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod083bfd5734c4504d
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod.AttachmentVisitor43014c50e1310fbf
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod.TransformedParameter84642c4a6f0d1bdc
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod.TransformedParameterList54d561afbee57f99
net.bytebuddy.dynamic.Transformer.NoOp49cd89a2b3b975a3
net.bytebuddy.dynamic.TypeResolutionStrategy.Passived5784ee7fb36ce53
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Defaultae8d9f7fd85c6aad
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Default.163c0d42260c7599e
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Default.2a8389e9d32c4ecd7
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Default.330f7afc5a8be245c
net.bytebuddy.dynamic.loading.ClassInjector.AbstractBase331215a38873f162
net.bytebuddy.dynamic.loading.ClassInjector.UsingLookup68987d870211e579
net.bytebuddy.dynamic.loading.ClassLoadingStrategy17fb081ccc92f99c
net.bytebuddy.dynamic.loading.ClassLoadingStrategy.UsingLookup2907954eb970dda6
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.Defaultf0774d4bbe85a809
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.Default.109a3c2cfe88a5ae4
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.Default.276afb59bd5abdd5f
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.FrameComputingClassWriter6dcf362306ddc5d0
net.bytebuddy.dynamic.scaffold.FieldLocator.AbstractBasedb8c5004661a0bd8
net.bytebuddy.dynamic.scaffold.FieldLocator.ForClassHierarchy0e8431af1152b965
net.bytebuddy.dynamic.scaffold.FieldLocator.ForClassHierarchy.Factoryd97235dbbc3871e9
net.bytebuddy.dynamic.scaffold.FieldLocator.Resolution.Simple7e3dca01a01498d1
net.bytebuddy.dynamic.scaffold.FieldRegistry.Defaultcc5265630d0906f2
net.bytebuddy.dynamic.scaffold.FieldRegistry.Default.Compiled00933225bc77b175
net.bytebuddy.dynamic.scaffold.FieldRegistry.Default.Compiled.Entry0ec1361a69a955fd
net.bytebuddy.dynamic.scaffold.FieldRegistry.Default.Entrya7413622fd851aa9
net.bytebuddy.dynamic.scaffold.InstrumentedType.Default23fe149436dbe425
net.bytebuddy.dynamic.scaffold.InstrumentedType.Factory.Defaultcd900ae01efd903f
net.bytebuddy.dynamic.scaffold.InstrumentedType.Factory.Default.1a7ce85bb2f37ff77
net.bytebuddy.dynamic.scaffold.InstrumentedType.Factory.Default.2ad157a47dace4f55
net.bytebuddy.dynamic.scaffold.MethodGraph.Compilerfc88be698cc4a50f
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.AbstractBasead55505e167100d9
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Defaulta37bac0e0eceb0c9
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Harmonizer.ForJavaMethod4b92bfc82ab49b25
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Harmonizer.ForJavaMethod.Tokene2da236960e0a189
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key421619c0f44567f3
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Detached82540bbf94c15922
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Harmonized5d9ad1d55d82a355
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Storef948e4de58324a0f
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Ambiguous9e2928a385a525ac
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Initial1fc852958287c36a
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Resolved6672a261c5f5dd2e
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Resolved.Node0f0b18948cce4159
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Graphf50e2614e64a132c
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Merger.Directional0ba0f74ab7d66be7
net.bytebuddy.dynamic.scaffold.MethodGraph.Linked.Delegation7341085250d5f338
net.bytebuddy.dynamic.scaffold.MethodGraph.Node.Sort8e20af4bf9dad8a0
net.bytebuddy.dynamic.scaffold.MethodGraph.Node.Unresolvedc42332646fb3e771
net.bytebuddy.dynamic.scaffold.MethodGraph.NodeList3f435ec381113f00
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default35ae92274e85ac88
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Compileddd840dc4ea29fc06
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Compiled.Entry827864e42dc177c2
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Entry66b9b2c39c4a08ee
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Prepared3c270a20a21353d7
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Prepared.Entrye96586202cb119f0
net.bytebuddy.dynamic.scaffold.MethodRegistry.Handler.ForImplementationea77701fcbc47e2c
net.bytebuddy.dynamic.scaffold.MethodRegistry.Handler.ForImplementation.Compiled7b000ab44a4af2cc
net.bytebuddy.dynamic.scaffold.RecordComponentRegistry.Defaulteec49897d441dcbe
net.bytebuddy.dynamic.scaffold.RecordComponentRegistry.Default.Compiled1d64a300c478cbd4
net.bytebuddy.dynamic.scaffold.TypeInitializer.Drain.Defaulta3bc2736d5ad95f5
net.bytebuddy.dynamic.scaffold.TypeInitializer.Noned062b02ed3f4d342
net.bytebuddy.dynamic.scaffold.TypeInitializer.Simple3429322f4d42e2d4
net.bytebuddy.dynamic.scaffold.TypeValidationb9ab70dc0d5e3c60
net.bytebuddy.dynamic.scaffold.TypeWriter.Defaultb1fd1390cd945339
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.ClassDumpAction.Dispatcher.Disabledc24d1fbb13ce4662
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.ForCreatione49a97cbadf48f53
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.UnresolvedTypecaecc137e21b50cd
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.ValidatingClassVisitor020d1828495e6c79
net.bytebuddy.dynamic.scaffold.TypeWriter.FieldPool.Record.ForExplicitField6db88b0864fac583
net.bytebuddy.dynamic.scaffold.TypeWriter.FieldPool.Record.ForImplicitField4d4ebb611a56f520
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.AccessBridgeWrapperd430962a4ae98a53
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.AccessBridgeWrapper.AccessorBridge0aa41f269546d97e
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.AccessBridgeWrapper.BridgeTarget8583bd798234d430
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.ForDefinedMethod676c34a7d131152e
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.ForDefinedMethod.WithBody175949ac2c965a4e
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.ForNonImplementedMethod1c0608714c9bf7eb
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.Sortce35c6847ba06f22
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default0d114e09a2faac83
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.116fc5c99e02d7f9f
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.2dd199479878d5739
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.3792ea5ce51475037
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.498fceb895a262b45
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.5f0898605f9020c16
net.bytebuddy.dynamic.scaffold.subclass.SubclassDynamicTypeBuilder0426764fb123b82d
net.bytebuddy.dynamic.scaffold.subclass.SubclassDynamicTypeBuilder.InstrumentableMatcher7bbde13577295432
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget17f509a8b52b39f3
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.Factoryf6c0a700d93e9d10
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.OriginTypeResolver282c73cc811d5b71
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.OriginTypeResolver.12eb773d398b87160
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.OriginTypeResolver.2903a99da03746eb8
net.bytebuddy.implementation.FieldAccessor0174e94238af9d2f
net.bytebuddy.implementation.FieldAccessor.FieldLocation.Relativee3f1a92ea73df3a5
net.bytebuddy.implementation.FieldAccessor.FieldLocation.Relative.Preparedc55029896988613b
net.bytebuddy.implementation.FieldAccessor.FieldNameExtractor.ForFixedValue37f6e575b29ba057
net.bytebuddy.implementation.FieldAccessor.ForImplicitProperty623c50de803e8dff
net.bytebuddy.implementation.FieldAccessor.ForImplicitProperty.Appenderdb2e4aeceee38d5f
net.bytebuddy.implementation.Implementation.Context.Default9436fa8001a9c80c
net.bytebuddy.implementation.Implementation.Context.Default.AbstractPropertyAccessorMethod64d4d27f3bf59427
net.bytebuddy.implementation.Implementation.Context.Default.AccessorMethod0644ef9990e5aa98
net.bytebuddy.implementation.Implementation.Context.Default.AccessorMethodDelegationea3233ec172d7765
net.bytebuddy.implementation.Implementation.Context.Default.CacheValueField0a79d6bd2bd7e867
net.bytebuddy.implementation.Implementation.Context.Default.DelegationRecord490687e4353099fe
net.bytebuddy.implementation.Implementation.Context.Default.Factory655386a029730f91
net.bytebuddy.implementation.Implementation.Context.Default.FieldCacheEntry9b63ee13339c36d9
net.bytebuddy.implementation.Implementation.Context.ExtractableView.AbstractBase072a87c3ee3b1c13
net.bytebuddy.implementation.Implementation.SpecialMethodInvocation.AbstractBase99ac1d4463895d3f
net.bytebuddy.implementation.Implementation.SpecialMethodInvocation.Illegalfe05bdf1b81d2463
net.bytebuddy.implementation.Implementation.SpecialMethodInvocation.Simple7916d516ba029853
net.bytebuddy.implementation.Implementation.Target.AbstractBase891cf9f2a321fafd
net.bytebuddy.implementation.Implementation.Target.AbstractBase.DefaultMethodInvocation29b19b204be139f3
net.bytebuddy.implementation.Implementation.Target.AbstractBase.DefaultMethodInvocation.13ba9a760aa49a971
net.bytebuddy.implementation.Implementation.Target.AbstractBase.DefaultMethodInvocation.28279f38afb254f72
net.bytebuddy.implementation.LoadedTypeInitializer.NoOp1af8ca0d9b7adbe8
net.bytebuddy.implementation.MethodAccessorFactory.AccessTypea8b1b417256441f1
net.bytebuddy.implementation.MethodDelegationa34026f28347c757
net.bytebuddy.implementation.MethodDelegation.Appenderc0bd24965973bfa5
net.bytebuddy.implementation.MethodDelegation.ImplementationDelegate.Compiled.ForStaticCallf75269107418d3a6
net.bytebuddy.implementation.MethodDelegation.ImplementationDelegate.ForStaticMethod44d456b1507ffbbe
net.bytebuddy.implementation.MethodDelegation.WithCustomPropertiesf108031992acdb03
net.bytebuddy.implementation.SuperMethodCall48a9709638c71f00
net.bytebuddy.implementation.SuperMethodCall.Appender1278488d60ed8e86
net.bytebuddy.implementation.SuperMethodCall.Appender.TerminationHandler35d2e0ef6d7f630d
net.bytebuddy.implementation.SuperMethodCall.Appender.TerminationHandler.105664af3a3b6738b
net.bytebuddy.implementation.SuperMethodCall.Appender.TerminationHandler.2be670f96c6d93831
net.bytebuddy.implementation.attribute.AnnotationAppender.Default7787cf7f483d6685
net.bytebuddy.implementation.attribute.AnnotationAppender.ForTypeAnnotations040d5aab72de4582
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnField52ad3ce83f52621f
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnMethodb2534f024a4880dd
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnMethodParameterc9f39d80b694c092
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnTypedb8f4f1dbbcf3c3e
net.bytebuddy.implementation.attribute.AnnotationRetention6dca59a58d56874f
net.bytebuddy.implementation.attribute.AnnotationValueFilter.Default190882f8828de18a
net.bytebuddy.implementation.attribute.AnnotationValueFilter.Default.1593737e47cc84848
net.bytebuddy.implementation.attribute.AnnotationValueFilter.Default.2a61861baa0bc96ee
net.bytebuddy.implementation.attribute.FieldAttributeAppender.ForInstrumentedFieldca19f51ae14fb7b4
net.bytebuddy.implementation.attribute.MethodAttributeAppender.ForInstrumentedMethod4e40a53e08d4cbbb
net.bytebuddy.implementation.attribute.MethodAttributeAppender.ForInstrumentedMethod.1a3b87b1a75d290fd
net.bytebuddy.implementation.attribute.MethodAttributeAppender.ForInstrumentedMethod.210e734a991eea3bf
net.bytebuddy.implementation.attribute.MethodAttributeAppender.NoOpaa6841038c96aed0
net.bytebuddy.implementation.attribute.TypeAttributeAppender.ForInstrumentedType537a1dac83c99ae9
net.bytebuddy.implementation.auxiliary.AuxiliaryType577555a7861b5701
net.bytebuddy.implementation.auxiliary.AuxiliaryType.NamingStrategy.SuffixingRandom9ff4d19573d987f3
net.bytebuddy.implementation.bind.ArgumentTypeResolver74973272be85ce17
net.bytebuddy.implementation.bind.ArgumentTypeResolver.ParameterIndexTokena8052b758f0a0361
net.bytebuddy.implementation.bind.DeclaringTypeResolverd1000b5d5bf7bd79
net.bytebuddy.implementation.bind.MethodDelegationBinder.154de841f73ee4eae
net.bytebuddy.implementation.bind.MethodDelegationBinder.AmbiguityResolver7d40b5a2d5d69397
net.bytebuddy.implementation.bind.MethodDelegationBinder.AmbiguityResolver.Compoundeab4a548d2693cd2
net.bytebuddy.implementation.bind.MethodDelegationBinder.AmbiguityResolver.Resolutione8ca39d95b4ade42
net.bytebuddy.implementation.bind.MethodDelegationBinder.BindingResolver.Defaulted3f9e212bdf4696
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodBinding.Builderffaacecf2e1956bd
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodBinding.Builder.Buildfbe15ed2c0b7c26f
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodBinding.Illegalca301be97fe35cde
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodInvoker.Simpledafea2ba3b2f164b
net.bytebuddy.implementation.bind.MethodDelegationBinder.ParameterBinding.Anonymous30b0f734840f8b2c
net.bytebuddy.implementation.bind.MethodDelegationBinder.ParameterBinding.Illegal470dc52d77c3898e
net.bytebuddy.implementation.bind.MethodDelegationBinder.Processor1dd9238ba412581f
net.bytebuddy.implementation.bind.MethodDelegationBinder.TerminationHandler.Default946265fda2ca27e8
net.bytebuddy.implementation.bind.MethodDelegationBinder.TerminationHandler.Default.1db109132d7373fda
net.bytebuddy.implementation.bind.MethodDelegationBinder.TerminationHandler.Default.2cb3895b610bd15d5
net.bytebuddy.implementation.bind.MethodNameEqualityResolver65a8d1431b34fdcd
net.bytebuddy.implementation.bind.ParameterLengthResolver58a025cd0f10dff1
net.bytebuddy.implementation.bind.annotation.AllArguments.Assignmentbfcd0244baa95f1b
net.bytebuddy.implementation.bind.annotation.AllArguments.Binder7ed5bf64ac194c84
net.bytebuddy.implementation.bind.annotation.Argument.Binder9d613cfc7a8f0cd6
net.bytebuddy.implementation.bind.annotation.BindingPriority.Resolver2fd170c18c979895
net.bytebuddy.implementation.bind.annotation.Default.Binderfdd8dd2baa86d3db
net.bytebuddy.implementation.bind.annotation.DefaultCall.Binderd7e4b58cec267a0e
net.bytebuddy.implementation.bind.annotation.DefaultMethod.Binder03d209c7b50b3b07
net.bytebuddy.implementation.bind.annotation.Empty.Binder6af2e8e3cdad25b3
net.bytebuddy.implementation.bind.annotation.FieldValue.Binderffe1f66fdf57240f
net.bytebuddy.implementation.bind.annotation.FieldValue.Binder.Delegateb16d4f0b5def41e9
net.bytebuddy.implementation.bind.annotation.IgnoreForBinding.Verifierf6eaa0a37f2ce769
net.bytebuddy.implementation.bind.annotation.Morph.Binder221cfe9babd6b9a8
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxyf26adba7c4fb3aad
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.InstanceFieldConstructor82810ce8d8d3c7f6
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.InstanceFieldConstructor.Appender5a9bd62b26738c1e
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.MethodCall90fc1233517a7f78
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.MethodCall.Appender30ecfa36670ca8d7
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.StaticFieldConstructor4944bebd8a49b603
net.bytebuddy.implementation.bind.annotation.Origin.Binder58bfe04015269f97
net.bytebuddy.implementation.bind.annotation.RuntimeType.Verifier79ef98193cf36f83
net.bytebuddy.implementation.bind.annotation.StubValue.Binderc5dcbbaafc956a20
net.bytebuddy.implementation.bind.annotation.Super.Binder159db3adf8f80917
net.bytebuddy.implementation.bind.annotation.SuperCall.Binderd504027b57aeebbe
net.bytebuddy.implementation.bind.annotation.SuperMethod.Binder787b81ea7c3cf9d1
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBindera9644f0a487b56f8
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.DelegationProcessor08e777de45b651f6
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.DelegationProcessor.Handler.Boundfe4b74c6469cb373
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.DelegationProcessor.Handler.Unbound53b08d554175038c
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.ParameterBinder6f273cd5a9428c36
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.ParameterBinder.ForFieldBinding49c4acf91fc87123
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.Recordf5597b43768b5a7b
net.bytebuddy.implementation.bind.annotation.This.Binderb3e837fb5b95fa04
net.bytebuddy.implementation.bytecode.ByteCodeAppender.Compound0f6ce72d7ea48338
net.bytebuddy.implementation.bytecode.ByteCodeAppender.Simple3d7cd79d87926f75
net.bytebuddy.implementation.bytecode.ByteCodeAppender.Size897030ac0b46252c
net.bytebuddy.implementation.bytecode.Duplication87726ed8bb6e39de
net.bytebuddy.implementation.bytecode.Duplication.16cbf4aae44bb9c6a
net.bytebuddy.implementation.bytecode.Duplication.2204abf23cbf37c68
net.bytebuddy.implementation.bytecode.Duplication.30631976e078609bd
net.bytebuddy.implementation.bytecode.Removal6d539a300caa5092
net.bytebuddy.implementation.bytecode.Removal.1ab763f3b743f79a5
net.bytebuddy.implementation.bytecode.Removal.2fd766afb93ac2a09
net.bytebuddy.implementation.bytecode.StackManipulation.AbstractBase31ac4a0904ac3e09
net.bytebuddy.implementation.bytecode.StackManipulation.Compound96939a22aac4c91b
net.bytebuddy.implementation.bytecode.StackManipulation.Illegald75e2eb0d394f6c3
net.bytebuddy.implementation.bytecode.StackManipulation.Sizee69b15cd3e8d4461
net.bytebuddy.implementation.bytecode.StackManipulation.Trivial56f2787cdbce4d40
net.bytebuddy.implementation.bytecode.StackSize80f94e8effa2f7bb
net.bytebuddy.implementation.bytecode.TypeCreation4865d2e454028bc1
net.bytebuddy.implementation.bytecode.assign.Assigner7e67d52e9390b000
net.bytebuddy.implementation.bytecode.assign.Assigner.Typingb09adf7fa17d04b8
net.bytebuddy.implementation.bytecode.assign.TypeCasting1a445bd188e2931d
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveBoxingDelegatedac9a66a711d1bdb
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveBoxingDelegate.BoxingStackManipulation96e0379915a5a251
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveTypeAwareAssignerc888a19b998b7769
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveUnboxingDelegate14e47d44e5cebb1d
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveUnboxingDelegate.ImplicitlyTypedUnboxingResponsibleadf7d49661fe0566
net.bytebuddy.implementation.bytecode.assign.primitive.VoidAwareAssigner3df36760b29d387a
net.bytebuddy.implementation.bytecode.assign.reference.GenericTypeAwareAssigner3623cb487284bb53
net.bytebuddy.implementation.bytecode.assign.reference.ReferenceTypeAwareAssigner59b5f6f8641c87f2
net.bytebuddy.implementation.bytecode.collection.ArrayAccessee5ac198c6726a54
net.bytebuddy.implementation.bytecode.collection.ArrayAccess.Loader4998ef35821cb2a3
net.bytebuddy.implementation.bytecode.collection.ArrayFactoryf2dcfb1430649b3e
net.bytebuddy.implementation.bytecode.collection.ArrayFactory.ArrayCreator7ff584cc516e3f40
net.bytebuddy.implementation.bytecode.collection.ArrayFactory.ArrayCreator.ForReferenceType2ffee25860dde2e1
net.bytebuddy.implementation.bytecode.collection.ArrayFactory.ArrayStackManipulation2420354f9fdfb502
net.bytebuddy.implementation.bytecode.constant.ClassConstant8c2c8e360f844ad5
net.bytebuddy.implementation.bytecode.constant.ClassConstant.ForReferenceTypea779a54b4d7fcd6c
net.bytebuddy.implementation.bytecode.constant.DefaultValue56544d5987e5a6d8
net.bytebuddy.implementation.bytecode.constant.DoubleConstant829c95b7b67e95cf
net.bytebuddy.implementation.bytecode.constant.FloatConstantbdee038754940fff
net.bytebuddy.implementation.bytecode.constant.IntegerConstant58a28f871a6a0499
net.bytebuddy.implementation.bytecode.constant.LongConstant113f925135fa3020
net.bytebuddy.implementation.bytecode.constant.MethodConstant55d1fac9a2312bd2
net.bytebuddy.implementation.bytecode.constant.MethodConstant.CachedMethod927dce16203d5f6c
net.bytebuddy.implementation.bytecode.constant.MethodConstant.ForMethod5c66dba4a8bfbcea
net.bytebuddy.implementation.bytecode.constant.NullConstant9cf4bfc5c52a2517
net.bytebuddy.implementation.bytecode.constant.TextConstant76b9599de59f2aeb
net.bytebuddy.implementation.bytecode.member.FieldAccesse098860a4703e90a
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher20c90535a547e3cd
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher.AbstractFieldInstruction75724b7b6b2e4a66
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher.FieldGetInstructionadcac7724ac0272c
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher.FieldPutInstructionaeaedb775e139b65
net.bytebuddy.implementation.bytecode.member.MethodInvocationccdb8e0f61d03f72
net.bytebuddy.implementation.bytecode.member.MethodInvocation.Invocation7edd2eb29addcb20
net.bytebuddy.implementation.bytecode.member.MethodInvocation.OfGenericMethod5254ba2d92c92d3b
net.bytebuddy.implementation.bytecode.member.MethodReturn3cbfd6833fda70dd
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess7ec211e72c6c3719
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.MethodLoading0b690307be533e18
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.MethodLoading.TypeCastingHandler.ForBridgeTarget89785916231f4dad
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.MethodLoading.TypeCastingHandler.NoOp3f3d0d86b569e241
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.OffsetLoading4794627822a950ec
net.bytebuddy.jar.asm.AnnotationWriter0932d72e909ca807
net.bytebuddy.jar.asm.Attribute706e3dca943537f4
net.bytebuddy.jar.asm.ByteVector202001c737179f70
net.bytebuddy.jar.asm.ClassVisitor31cdb4a9a90ec9ca
net.bytebuddy.jar.asm.ClassWriter5ae0ee3b90595eef
net.bytebuddy.jar.asm.FieldVisitor476724e2a3739cdb
net.bytebuddy.jar.asm.FieldWriter3c4ebfcb2bc7032e
net.bytebuddy.jar.asm.Handler763c7a3b0dc4fc7e
net.bytebuddy.jar.asm.MethodVisitor196dbaf0d45984ba
net.bytebuddy.jar.asm.MethodWriter76fc9326535687d1
net.bytebuddy.jar.asm.Symbolf44d88efeab63dac
net.bytebuddy.jar.asm.SymbolTable00001f478e852135
net.bytebuddy.jar.asm.SymbolTable.Entry904cbca1953e75e2
net.bytebuddy.jar.asm.Type45a01df29df18510
net.bytebuddy.jar.asm.TypeReference7c2c246da0bafedc
net.bytebuddy.jar.asm.signature.SignatureVisitorba629ff09a5c44a8
net.bytebuddy.jar.asm.signature.SignatureWriterc8f0c38b6698b545
net.bytebuddy.matcher.AnnotationTypeMatcher4c083a293a95675e
net.bytebuddy.matcher.BooleanMatcherfc276a6c128e2875
net.bytebuddy.matcher.CollectionItemMatcher640386844f0e29b8
net.bytebuddy.matcher.CollectionSizeMatcher8f59b8be9ab4a58b
net.bytebuddy.matcher.DeclaringTypeMatcher76e282c5482618bb
net.bytebuddy.matcher.ElementMatcher.Junction.AbstractBased129e1a5bbea50cb
net.bytebuddy.matcher.ElementMatcher.Junction.Conjunction6586c7d2abf8bf59
net.bytebuddy.matcher.ElementMatcher.Junction.Disjunction78eb86ff19c5e913
net.bytebuddy.matcher.ElementMatcher.Junction.ForNonNullValues40b97e222b442c20
net.bytebuddy.matcher.ElementMatchersd173e8185d30d23b
net.bytebuddy.matcher.EqualityMatcher7ddcccca3867f2c6
net.bytebuddy.matcher.ErasureMatcher327b39df894c794a
net.bytebuddy.matcher.FilterableList.AbstractBaseacc833b482b3e913
net.bytebuddy.matcher.FilterableList.Empty994e694dc878695f
net.bytebuddy.matcher.LatentMatcher.ForFieldToken9bce736b4fed0d2f
net.bytebuddy.matcher.LatentMatcher.ForFieldToken.ResolvedMatchercb20c74d5d2fa4f2
net.bytebuddy.matcher.LatentMatcher.ForMethodToken3f8fdb09acae36bc
net.bytebuddy.matcher.LatentMatcher.ForMethodToken.ResolvedMatcher0c0799981f6a5074
net.bytebuddy.matcher.LatentMatcher.Resolved415ddaef2832e0a1
net.bytebuddy.matcher.MethodParameterTypeMatcherd565dce3bed4679b
net.bytebuddy.matcher.MethodParametersMatcher754bf9d07553d1f9
net.bytebuddy.matcher.MethodReturnTypeMatcher1b6fa22a35a706bc
net.bytebuddy.matcher.MethodSortMatcherd9a4a7f8ba8d705a
net.bytebuddy.matcher.MethodSortMatcher.Sortdf4da3ccf1c43fb2
net.bytebuddy.matcher.MethodSortMatcher.Sort.19f8edcf420246fae
net.bytebuddy.matcher.MethodSortMatcher.Sort.25b30e294f2304972
net.bytebuddy.matcher.MethodSortMatcher.Sort.39c8b9e468a9ba4ee
net.bytebuddy.matcher.MethodSortMatcher.Sort.44c3709005a13f932
net.bytebuddy.matcher.MethodSortMatcher.Sort.593400b67a6230353
net.bytebuddy.matcher.ModifierMatcherc0d2e66fbd31c083
net.bytebuddy.matcher.ModifierMatcher.Mode09bd88f8f539be92
net.bytebuddy.matcher.NameMatcherb901fc4b35799fa4
net.bytebuddy.matcher.NegatingMatchera7d93978e9d78d7e
net.bytebuddy.matcher.SignatureTokenMatcher60c758b99c3d9148
net.bytebuddy.matcher.StringMatcher236df1d1d60ab580
net.bytebuddy.matcher.StringMatcher.Mode78a8ab1a5e998326
net.bytebuddy.matcher.StringMatcher.Mode.1197cd818fecbf0dc
net.bytebuddy.matcher.StringMatcher.Mode.2130a12e752b093e0
net.bytebuddy.matcher.StringMatcher.Mode.337e1825b2b41bae8
net.bytebuddy.matcher.StringMatcher.Mode.434a59e75ad57ee16
net.bytebuddy.matcher.StringMatcher.Mode.56b18de0e0195fcc7
net.bytebuddy.matcher.StringMatcher.Mode.6bdaf5299d13e3bfe
net.bytebuddy.matcher.StringMatcher.Mode.7f608050eb76b29c9
net.bytebuddy.matcher.StringMatcher.Mode.87a1f43a330aa49e3
net.bytebuddy.matcher.StringMatcher.Mode.9d97cfe0669542624
net.bytebuddy.matcher.SuperTypeMatcher5f65e9ccb1649334
net.bytebuddy.matcher.TypeSortMatcherbea3cd319f7a9ab6
net.bytebuddy.matcher.VisibilityMatcher6f0d2c70b6ce50e1
net.bytebuddy.pool.TypePool.AbstractBase03ef41c73bcdac6f
net.bytebuddy.pool.TypePool.AbstractBase.Hierarchical1ef4bf1634aa9314
net.bytebuddy.pool.TypePool.CacheProvider.Simple3b477cf62a71a399
net.bytebuddy.pool.TypePool.ClassLoadingf60fbd5bc692f3c0
net.bytebuddy.pool.TypePool.Empty8c0a9ed2a729f1ac
net.bytebuddy.utility.CompoundListb8b501baeee21c20
net.bytebuddy.utility.ConstructorComparatorc7333b6b982e8e09
net.bytebuddy.utility.GraalImageCode99c2d8870a99ec8c
net.bytebuddy.utility.Invoker.Dispatcherbc20f0bd33abbced
net.bytebuddy.utility.JavaTyped4f7d1c4025d005b
net.bytebuddy.utility.JavaType.LatentTypeWithSimpleName420041c8025136fc
net.bytebuddy.utility.MethodComparator4e5549fe1a1bb16a
net.bytebuddy.utility.RandomString2eafdcdb79f5efa0
net.bytebuddy.utility.dispatcher.JavaDispatcher3c1bd07bc810ae2b
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForInstanceCheck7dbb26a52ed326b5
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForNonStaticMethodf63395a9698d65ee
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForStaticMethod2951db96c63571df
net.bytebuddy.utility.dispatcher.JavaDispatcher.DynamicClassLoaderb0851709ab51952f
net.bytebuddy.utility.dispatcher.JavaDispatcher.DynamicClassLoader.Resolver.CreationAction649b66a6bb185e58
net.bytebuddy.utility.dispatcher.JavaDispatcher.DynamicClassLoader.Resolver.ForModuleSysteme2d793e6145a9f21
net.bytebuddy.utility.dispatcher.JavaDispatcher.InvokerCreationAction97a1f6e01df99348
net.bytebuddy.utility.dispatcher.JavaDispatcher.ProxiedInvocationHandler8b115083d4a498dd
net.bytebuddy.utility.privilege.GetSystemPropertyAction3dcb9c5481b99d57
neureka.AbstractNda1f3450b6f454c692
neureka.AbstractNda.10e3f5a490e7074d2
neureka.Data1e1f2d81a1456715
neureka.Data.gete3d0a8b81486fe87
neureka.MutateNda.Item.set07f52e88f7035c0d
neureka.MutateTensor1b6e0f82a46d90e4
neureka.MutateTensor.assign.12fa66574a9807cef7
neureka.MutateTensor.delete.2c5e3507e3c8c7614
neureka.MutateTensor.detach.3676fc518378d4467
neureka.MutateTensor.divAssign.2344b713ecc5028ce8
neureka.MutateTensor.label.9d6e86948876c6935
neureka.MutateTensor.labelAxes.10a272a0394c9edca8
neureka.MutateTensor.labelAxes.7cdf066f741f2dc3f
neureka.MutateTensor.labelAxes.8a7f8ce418846513a
neureka.MutateTensor.minusAssign.197eafc26091f477ce
neureka.MutateTensor.minusAssign.2194e4f2a2d994515f
neureka.MutateTensor.plusAssign.20f2c5e67bf9e07aa1
neureka.MutateTensor.putAt.06727461406d90fc5
neureka.MutateTensor.putAt.1247111b43522ee56
neureka.MutateTensor.putAt.1403a35e5442231d64
neureka.MutateTensor.putAt.15c8a52b59b91c7ed8
neureka.MutateTensor.putAt.18dd03e7a30b03d7c8
neureka.MutateTensor.set.24744f5968a1264deb
neureka.MutateTensor.set.25c27f6d278eb9a676
neureka.MutateTensor.setDataAt.164147c0d68004e04f
neureka.MutateTensor.setIsIntermediate.1153c93aefcbdaa611
neureka.MutateTensor.setIsVirtual.505b31075aa7207c1
neureka.MutateTensor.setItemAt.132e49d383cce7bd5e
neureka.MutateTensor.setItems340ecb11225269c7
neureka.MutateTensor.timesAssign.17c4e7f946b63ea102
neureka.MutateTensor.timesAssign.22a138c4dc571bd526
neureka.MutateTensor.toLayout.65f7bf6fe8b9331ad
neureka.MutateTensor.toType.47721b5e852f7d2b0
neureka.Ndaa74b84d99ac6a286
neureka.Nda.Item00f7b7e29af6b419
neureka.Nda.Item.getf8aa941ffb315260
neureka.Nda.Item.orElse.0d8d29437f5810f3e
neureka.Nda.getItemsAs.07c1128f5074d2ca0
neureka.Nda.item.2deb982ee07fac86b
neureka.Nda.itemType.1262e019766e0ad9f
neureka.Nda.of.3f7f751b7adf22792
neureka.Nda.of.4a2eba635e28f5c28
neureka.Nda.of.58ecb9bfc6847f887
neureka.Nda.toStringff5c475d7b16bc7f
neureka.Neurekaf591e7aa79ed5b93
neureka.Neureka.Settings1833f6958a1b018c
neureka.Neureka.Settings.AutoGradf479a0307066625f
neureka.Neureka.Settings.AutoGrad.isApplyingGradientWhenRequested.68f97aea6e103353c
neureka.Neureka.Settings.AutoGrad.isApplyingGradientWhenTensorIsUsed.59f59738c76d842aa
neureka.Neureka.Settings.AutoGrad.isPreventingInlineOperations.2389472756c95a7e6
neureka.Neureka.Settings.AutoGrad.isRetainingPendingErrorForJITProp.4bb7040788758a34d
neureka.Neureka.Settings.AutoGrad.setIsApplyingGradientWhenRequested93cfaf2d58a22298
neureka.Neureka.Settings.AutoGrad.setIsApplyingGradientWhenTensorIsUsed.02479730e56276919
neureka.Neureka.Settings.AutoGrad.setIsPreventingInlineOperations.390edd332d921f4ed
neureka.Neureka.Settings.AutoGrad.setIsRetainingPendingErrorForJITProp.14c01f8621dada25a
neureka.Neureka.Settings.DType5156c6aecfd4027d
neureka.Neureka.Settings.Debug18d963a344eb441a
neureka.Neureka.Settings.Debug.isKeepingDerivativeTargetPayloads.0a8b81f122c52f180
neureka.Neureka.Settings.Debug.setIsKeepingDerivativeTargetPayloadse26040cb036c0375
neureka.Neureka.Settings.NDim83286de70119057d
neureka.Neureka.Settings.NDim.isOnlyUsingDefaultNDConfiguration555ee8653993b561
neureka.Neureka.Settings.NDim.setIsOnlyUsingDefaultNDConfiguration.09c01c5fe44c90586
neureka.Neureka.Settings.View8aca9a12dfb34130
neureka.Neureka.Settings.View.getNDPrintSettings931db50c6f738529
neureka.Neureka.Settings.autograd.0b0854d2b9729940f
neureka.Neureka.Settings.debug.1476e840ce3f13ed3
neureka.Neureka.Settings.dtype.429a78e2ba2ef83ab
neureka.Neureka.Settings.ndim.3444e7b5a5826351c
neureka.Neureka.Settings.setIsLocked.2b23fa382ada3d62a
neureka.Neureka.Settings.view4a2419f94cb703e0
neureka.Neureka.Utility1db53bf0e21a406f
neureka.Neureka.backend.34c3909699958faf3
neureka.Neureka.canAccessOpenCLDevice.0d7b6dba6b691d2ee
neureka.Neureka.configure.438d2312d1675cf12
neureka.Neureka.get9cdcd6380b50cdbc
neureka.Neureka.reset.1f8f07be935a93660
neureka.Neureka.settings.25bf0d40b361486f8
neureka.Shapef3648677c1f7ae8c
neureka.Shape.165cfcd77df5adc8f
neureka.Shape.25122df5066b9e672
neureka.Shape.ofc0c4b61960032041
neureka.Shape.of.03d3d4ae25ef72e4e
neureka.Shape.of.16567cbba2d9b67b7
neureka.Shape.slice.2470ab630f0119a9a
neureka.Tensor146a09dc641f4698
neureka.Tensor.ImageTyped1653d705851f956
neureka.Tensor.T.2029982ec558551794
neureka.Tensor.abs.79194770eca515da5a
neureka.Tensor.applyGradient.396ba0b15b4f4c2266
neureka.Tensor.asImage.10792231ec958b20d79
neureka.Tensor.backward.28d4a2da7342b69489
neureka.Tensor.backward.475efc1b916bfe15c1
neureka.Tensor.backward.5940d01de2a08828bd
neureka.Tensor.cbrt.8139990cdd1798803f
neureka.Tensor.concatAt.880c9d05ca6834b3cf
neureka.Tensor.conv.9848ec13c3f811816e
neureka.Tensor.convDot.217388ab403da6aca6
neureka.Tensor.cos.75014c52933b070794
neureka.Tensor.deepCopy.93295139ba6df539dc
neureka.Tensor.detached.54ff7a8ead63a7ed44
neureka.Tensor.dimtrim.96dc3dcf4de057e99b
neureka.Tensor.div.0e0b1f462ea06044f
neureka.Tensor.div.49ca854ec030de3ba3
neureka.Tensor.dot.99ac1ea1cd185ceed5
neureka.Tensor.exp.77eb9934ee7c193379
neureka.Tensor.frame.707235f6c4896640a6
neureka.Tensor.getAt.102da3e467d6a05a358
neureka.Tensor.getAt.11fff938965b555c3d
neureka.Tensor.getAt.293bbfeed0b70b4e4c
neureka.Tensor.getAt.900e688e4f744ab497
neureka.Tensor.getAt.91adf9430945d6a6bc
neureka.Tensor.getDataType.113a5f9d50b4c79ba74
neureka.Tensor.getDeviceca1917ad1cb1c6a6
neureka.Tensor.getMut.58049c0ebd9254e8bf
neureka.Tensor.ln.7365cb6ee3e008b2e0
neureka.Tensor.log10.82adc001ca681055a4
neureka.Tensor.mapTo.36c33612106cfde35a
neureka.Tensor.matMul.32e64fc8e057d5cc73
neureka.Tensor.max.111b83ded48a30165fc
neureka.Tensor.mean.50bfc108621323a57a
neureka.Tensor.min.11076898eb48a1fdb31
neureka.Tensor.minus.194e7e92ab4929fdee
neureka.Tensor.mod.1086952c7bed13ed84b
neureka.Tensor.multiply.170aaa8ecae35aceba
neureka.Tensor.neg.8052d822b44bbd257f
neureka.Tensor.newInstance.5184909c30433b66ab
neureka.Tensor.of.1f0864f71a027ebf6
neureka.Tensor.of.10092d718327991d60
neureka.Tensor.of.100aa36bb6340d5bee3
neureka.Tensor.of.103ff55bcb4b6f6854d
neureka.Tensor.of.1048d11422ec57d5501
neureka.Tensor.of.1055f475c7412eb9cb8
neureka.Tensor.of.10692c1b29957ee3977
neureka.Tensor.of.10937bd40bb944624a1
neureka.Tensor.of.114e7e126397b5f9185
neureka.Tensor.of.131edf023f52d617b6
neureka.Tensor.of.14eab0353aa8f5b05f
neureka.Tensor.of.1543c7b232350bbcb3
neureka.Tensor.of.2d266b1a31bef6088
neureka.Tensor.of.220dd2b54b834c5af3
neureka.Tensor.of.23b997632b327edaea
neureka.Tensor.of.2774b478e88f6c8a71
neureka.Tensor.of.30298b4406d6997e1b
neureka.Tensor.of.37050ada30b19c2c61
neureka.Tensor.of.382aa45887f10ecf5e
neureka.Tensor.of.463aa9a3d69a27926
neureka.Tensor.of.4127b017ce3b28ba9f
neureka.Tensor.of.434311e1b4f8273d62
neureka.Tensor.of.5687462b5d0215e0e
neureka.Tensor.of.55114139752f2155a6
neureka.Tensor.of.566187f6467d1833cc
neureka.Tensor.of.57b92a78dc03efd28a
neureka.Tensor.of.6786a6f8b01de0830
neureka.Tensor.of.609b3247a4d3e15504
neureka.Tensor.of.61a39c0cb0aeb04f89
neureka.Tensor.of.62f21668f51f6194ea
neureka.Tensor.of.636d409962e33e25ce
neureka.Tensor.of.649610ce56e1abf61b
neureka.Tensor.of.6503c17615d9011968
neureka.Tensor.of.66917da15ab9dc7631
neureka.Tensor.of.67d368f42727f9683e
neureka.Tensor.of.686da8f5bb0c2ac269
neureka.Tensor.of.69f800fe9680e7fe70
neureka.Tensor.of.7258804efebcd94de
neureka.Tensor.of.71b831bfe3764e0dc5
neureka.Tensor.of.86f04664039ef3ae3
neureka.Tensor.of.93e9b0983c886994b
neureka.Tensor.of.92584c3bc185aebb93
neureka.Tensor.ofBytes.34c274a8747fbbf024
neureka.Tensor.ofDoubles.409ab4aec8c6921380
neureka.Tensor.ofFloats.121bc157c0796a5fea
neureka.Tensor.ofInts.350ca7a27cb7bbcb2e
neureka.Tensor.ofRandom.101044d4c83c95c89e6
neureka.Tensor.ofShorts.334c4e3d605f5acb2f
neureka.Tensor.plus.167a6fcdd3806197d5
neureka.Tensor.plus.311ca9dec28b37a2cf
neureka.Tensor.power.182facb1665d38fb25
neureka.Tensor.power.254e0da57d453d4900
neureka.Tensor.rqsGradient.52b2c98e7bb2661c49
neureka.Tensor.set.44d296abebe659879e
neureka.Tensor.setGradientApplyRequested.5326df48e4e979e133
neureka.Tensor.setRqsGradient.26eeece4d89425cc98
neureka.Tensor.shallowClone.95f7baae0346718732
neureka.Tensor.shallowCopy.94b80eb40129b5cd2e
neureka.Tensor.sig.7231f9566a0e49c37f
neureka.Tensor.sigmoid.84ecaa9f45b33d9f85
neureka.Tensor.sin.744c0c1d14cb44cb06
neureka.Tensor.slice.2440a5901c28ca8dc8
neureka.Tensor.softmax.45ec8336b9532d15f1
neureka.Tensor.softmax.8362ad0e1fd8f4c00e
neureka.Tensor.softplus.7655dc0704f13eedf9
neureka.Tensor.sqrt.786bf6356e7f1aafa9
neureka.Tensor.sum.1127b2985e6bd5cca32
neureka.Tensor.sum.4676dd8e5ddceed264
neureka.Tensor.tanh.485d62fda4eccb86a7
neureka.Tensor.to.3d23926cb26d0b55a
neureka.Tensor.to.9724fdc87a2d322707
neureka.Tensor.toString.42e65ccbcafbac93af
neureka.Tensor.transpose.89230cd17f201a62d9
neureka.Tensor.withLabel.85c1b6255228e9ea93
neureka.Tensor.withLabels.86dbdb6e24d171859d
neureka.Tensor.withLabels.87c5e50e9af7a76ed7
neureka.TensorConstructor5b19edb47ead1faa
neureka.TensorImplbdf62ce9a9a09416
neureka.TensorImpl.133b13862330829fa
neureka.TensorImpl.2e74a004384552f32
neureka.TensorImpl.33425f80d93d4faf4
neureka.autograd.ADActionb587f938eaec024c
neureka.autograd.ADTarget308960bb299c55ab
neureka.autograd.BackPropTargetCollector0decf4c02a129633
neureka.autograd.BackPropTargetCollector.Value4e76980261e45c6f
neureka.autograd.BackPropTargetsc91877508949cbdf
neureka.autograd.DefaultADAction7d441ba13cc9afbe
neureka.autograd.GraphNode3d6f520c252097bb
neureka.autograd.GraphNode.178ece96a77cd6d0f
neureka.autograd.GraphNode.Print0dcda56cbaf2ae4a
neureka.autograd.GraphNode.SpockMock.26942979e08bce3ab47f0e9
neureka.autograd.GraphNode.SpockMock.2694297.auxiliary.T3zsPHZl3824e385ea9cd874
neureka.autograd.GraphNode.SpockMock.2694297.auxiliary.WzzxoWrQ312be645f8b136a1
neureka.autograd.GraphNode.SpockMock.2694297.auxiliary.bH1ceFJw834498a5dcc39913
neureka.autograd.GraphNode.SpockMock.2694297.auxiliary.yJjFV3H5645d8e38d9cca08d
neureka.autograd.GraphNode.hasDerivatives763441d2776048f5
neureka.autograd.GraphNode.toString.052cd6496a9ae8afd
neureka.autograd.GraphNodeUtility4f6f522919fbebc0
neureka.autograd.JITProp4aa0d65fa8d59347
neureka.autograd.NodePayload162c988eb9ca60c4
neureka.autograd.PendingErrore4a556f8da729370
neureka.backend.api.Algorithm81b60b8c986b9c46
neureka.backend.api.AutoDiffModee01c513d11308dac
neureka.backend.api.BackendContexta7ec15aab8b3879a
neureka.backend.api.BackendContext.1e1df0363c19fe72a
neureka.backend.api.BackendContext.278a17a0cdf6fff00
neureka.backend.api.BackendContext.Registereda973a5f51481b770
neureka.backend.api.BackendContext.Runner921828ef760ec42f
neureka.backend.api.BackendContext.Runner.run565d8a64342cd790
neureka.backend.api.BackendContext.addOperation.2c63c30ef0292338b
neureka.backend.api.BackendContext.clone.0a43823f58286adb0
neureka.backend.api.BackendContext.find079d6ab232afbd7e
neureka.backend.api.BackendContext.getAutogradFunction.47d67625b522f46c5
neureka.backend.api.BackendContext.getFunction.5ecb20e0cc6ed3213
neureka.backend.api.BackendContext.getFunctionCache.6944db88465c2c4db
neureka.backend.api.BackendContext.getOperation.7457c3f4960530d37
neureka.backend.api.BackendContext.getOperations.3158fe1e8be6f51de
neureka.backend.api.BackendContext.runner.1c1104afcdd1d4594
neureka.backend.api.BackendExtension25eefeb27dd790df
neureka.backend.api.BackendExtension.DeviceOption8ea50ee14d3ed16e
neureka.backend.api.BackendExtension.dispose.0c0d70aa23b331cb2
neureka.backend.api.BackendExtension.getLoader3978a802c2ea1359
neureka.backend.api.Call9a7d7ba23b2c60b3
neureka.backend.api.Call.Builder6618c8f7c4dfc75e
neureka.backend.api.Call.Validator9d252b922dffe01d
neureka.backend.api.Call.Validator.Estimatorc74f82332946375e
neureka.backend.api.Call.getDevice1c14d42b391423f5
neureka.backend.api.Call.input.0f6af508717ebc700
neureka.backend.api.Call.input.10f17366a30f3ac39
neureka.backend.api.DeviceAlgorithma9ea94d5ac374866
neureka.backend.api.DeviceAlgorithm.getImplementationFor.17ee09a8bebd29d4f
neureka.backend.api.DeviceAlgorithm.setImplementationFor.0417cb1498f7ad3d4
neureka.backend.api.DeviceAlgorithm.withName60a528a122870753
neureka.backend.api.ExecutionCalld59fcb884749dcda
neureka.backend.api.ExecutionCall.Builder640e8372abbc9ac9
neureka.backend.api.ExecutionCall.Builder.on.05fab1310e345617c
neureka.backend.api.ExecutionCall.Builder.running59059111e72c819c
neureka.backend.api.ExecutionCall.SpockMock.1812554189605d45a8ffc5fcd5
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.7QBL52sf816d704384c52df3
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.HZuTBW3hbf31ccf269b2ce2b
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.Iq8the5657bccceb1336b544
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.JriLGfPPc0006b07bc165e66
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.JvbFaF7jc1f18133396ed6a8
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.euCJJhTI71baf6e6fcb007d6
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.qK3Jcbzxe352824c54465d32
neureka.backend.api.ExecutionCall.SpockMock.1812554189.auxiliary.vicGsefeee37e4443cb28a10
neureka.backend.api.ExecutionCall.withInputAta15752bda87c37db
neureka.backend.api.Extensions8c7948f255457561
neureka.backend.api.ImplementationFor.run8b7a2f6d939ec62c
neureka.backend.api.LazyRef64d3e1837a7085ab
neureka.backend.api.Operation6ad3016f8bcf177d
neureka.backend.api.Operation.builder39e6ceba459dd372
neureka.backend.api.Operation.getAlgorithm.4f8baffe79a92f5f2
neureka.backend.api.Operation.getAlgorithmFor.549e5bb75eed3b031
neureka.backend.api.Operation.getOperator.2c61d726f66987eb3
neureka.backend.api.Operation.isOperator.158c4b33c8b09c941
neureka.backend.api.Operation.setAlgorithm.0e0c89847099fad40
neureka.backend.api.Operation.supports.3a496404d180511f6
neureka.backend.api.Result3f90f5c8c8b1ffc1
neureka.backend.api.Result.ofb48638cac01f773d
neureka.backend.api.fun.ADSupportPredicate.autoDiffModeFromada185556783e174
neureka.backend.api.fun.ADSupportPredicate.autoDiffModeFrom.094e6b2e8134bb941
neureka.backend.api.fun.Execution.execute3b1f004ea6b31afe
neureka.backend.api.fun.ExecutionPreparation.preparee5a5b28384b1a8d7
neureka.backend.api.fun.SuitabilityPredicate.isSuitableForbb4b55b8759b3f32
neureka.backend.api.ini.BackendLoader.load4f58a4f0d7730889
neureka.backend.api.ini.BackendRegistry0ddf98512b3868bd
neureka.backend.api.ini.BackendRegistry.14395139f6d76ef6f
neureka.backend.api.ini.BackendRegistry.1.16eb5f7ad43346a64
neureka.backend.api.ini.BackendRegistry.of7033c7ab18de1c50
neureka.backend.api.template.algorithms.AbstractAlgorithm8fdf2ed357b1d924
neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm77e9827b44f2d1ea
neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm.executeDeviceAlgorithm6049363369249e40
neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm.toString.0dfc776f0ad72b91a
neureka.backend.api.template.algorithms.AbstractFunAlgorithm15fd33e54adc5639
neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm635837185fbab2b7
neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm.buildFunAlgorithmcd40280ff3c4b7b0
neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm.buildFunAlgorithm.0f4e27824e8db04ba
neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm.setCallPreparation.1401bab6d01aded04
neureka.backend.api.template.algorithms.FallbackAlgorithm7daec20c25142b45
neureka.backend.api.template.algorithms.FunAlgorithm8378eacba57fe4bb
neureka.backend.api.template.algorithms.FunDeviceAlgorithm376e6f261c60899c
neureka.backend.api.template.implementations.AbstractImplementationForc6a977b0eb67ecc5
neureka.backend.api.template.operations.AbstractOperationf2f17931402a612b
neureka.backend.api.template.operations.AbstractOperation.SpockMock.908380669897480dd39ecc344
neureka.backend.api.template.operations.AbstractOperation.SpockMock.908380669.auxiliary.Aw2O1ewn23739f7bc64c718a
neureka.backend.api.template.operations.AbstractOperation.SpockMock.908380669.auxiliary.N4To1tFHb1b2db066d494eeb
neureka.backend.api.template.operations.AbstractOperation.SpockMock.908380669.auxiliary.NHMscGKh188b92c8733f385d
neureka.backend.api.template.operations.OperationBuilder596c93f7d8133643
neureka.backend.api.template.operations.OperationBuilder.1d60af51a0a04fbeb
neureka.backend.api.template.operations.OperationBuilder.arity.17db0b5d0aed60cb8
neureka.backend.api.template.operations.OperationBuilder.build.6a44d811b93e05a3d
neureka.backend.api.template.operations.OperationBuilder.identifier08b4e6323f70c716
neureka.backend.api.template.operations.OperationBuilder.isDifferentiable.4c5ba78180ada8399
neureka.backend.api.template.operations.OperationBuilder.isIndexer.2dab0c35aeb220220
neureka.backend.api.template.operations.OperationBuilder.isInline.58884852ea7cb1678
neureka.backend.api.template.operations.OperationBuilder.isOperator.30b79dfafa6467f8b
neureka.backend.api.template.operations.OperationBuilder.operator.06daecafad96d897c
neureka.backend.cpu.CPUBackend2eb4ef33fc007992
neureka.backend.main.algorithms.BiElementwise7f6f73d225c7ea41
neureka.backend.main.algorithms.BiScalarBroadcast6c9a76753db17513
neureka.backend.main.algorithms.Broadcastcc8eda2b05017f4c
neureka.backend.main.algorithms.DotProductAlgorithme036b464c517a9de
neureka.backend.main.algorithms.ElementwiseAlgorithm75041536510cae5f
neureka.backend.main.algorithms.MatMulAlgorithm9983d3a56a829963
neureka.backend.main.algorithms.NDConvolution1b5564c0bceb341c
neureka.backend.main.algorithms.ScalarAlgorithm27e1b7db82dd8d34
neureka.backend.main.algorithms.ScalarBroadcast2dfaf17324947952
neureka.backend.main.algorithms.ScalarSumAlgorithma51e74ee53494ad5
neureka.backend.main.algorithms.SumAlgorithmb46a03f9f0007ad2
neureka.backend.main.algorithms.Util095eb35f95e78939
neureka.backend.main.implementations.CLImplementation67b705092590c0d7
neureka.backend.main.implementations.CPUImplementation486d0e5b0eb013ec
neureka.backend.main.implementations.CPUImplementation.withArity1883dd3a8cbd6123
neureka.backend.main.implementations.ParsedCLImplementatione122be48cd00430d
neureka.backend.main.implementations.SimpleCLImplementation8880462252cd2647
neureka.backend.main.implementations.broadcast.CLBroadcast08642a858acf96cc
neureka.backend.main.implementations.broadcast.CLBroadcastAdditione42d10bfb2c14139
neureka.backend.main.implementations.broadcast.CLBroadcastDivisionf57b27df47fbbdc8
neureka.backend.main.implementations.broadcast.CLBroadcastModulo447b4a8e4ce27c9a
neureka.backend.main.implementations.broadcast.CLBroadcastMultiplication8fe2e9b451b47568
neureka.backend.main.implementations.broadcast.CLBroadcastPower51dc1af5c01cd81d
neureka.backend.main.implementations.broadcast.CLBroadcastSubtraction727e87a3f8037a9d
neureka.backend.main.implementations.broadcast.CLScalarBroadcast6f2ce43012dd286d
neureka.backend.main.implementations.broadcast.CLScalarBroadcastAddition5e9bdd0d4c5e432f
neureka.backend.main.implementations.broadcast.CLScalarBroadcastDivisionfcc3f428b62b819f
neureka.backend.main.implementations.broadcast.CLScalarBroadcastIdentity1565633492c18396
neureka.backend.main.implementations.broadcast.CLScalarBroadcastModulo3092d767cc0c65f3
neureka.backend.main.implementations.broadcast.CLScalarBroadcastMultiplication2e71a357a2888ebf
neureka.backend.main.implementations.broadcast.CLScalarBroadcastPowerb831668cb494538e
neureka.backend.main.implementations.broadcast.CLScalarBroadcastSubtractionf6d0512e4f5099b4
neureka.backend.main.implementations.broadcast.CPUBroadcast01e81659cffc734c
neureka.backend.main.implementations.broadcast.CPUBroadcastAddition671d92531bd8a598
neureka.backend.main.implementations.broadcast.CPUBroadcastAddition.1e54b2c64ce0ff6dc
neureka.backend.main.implementations.broadcast.CPUBroadcastDivisionb401e911fe6e7509
neureka.backend.main.implementations.broadcast.CPUBroadcastDivision.139542288a8ff8f11
neureka.backend.main.implementations.broadcast.CPUBroadcastModulo28a72706a0d16e49
neureka.backend.main.implementations.broadcast.CPUBroadcastModulo.137009635809f10eb
neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication0bb44eef32b21279
neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication.10801f76801fe09aa
neureka.backend.main.implementations.broadcast.CPUBroadcastPower7f8dc4bd9d5b481e
neureka.backend.main.implementations.broadcast.CPUBroadcastPower.1fd28b6e7dd3ddf76
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtractionc8b956d47830389c
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction.1fb026d0b1e7609b7
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction.29c22b2e70d597d76
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction.302439cc07a77c19d
neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower6803536865329f62
neureka.backend.main.implementations.broadcast.CPUScalarBroadcast96b0a9c1a0a7d491
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAdditioncac02721ed47fc03
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition.1e24a92462e0bf896
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision6fc65018fc731ee8
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity5c7f36321ff335a9
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity.1f7c00b749786c98f
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModuloc080a7e7c118b5b9
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication3f71e774739beda6
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication.1b652e74442314f78
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction81d976377899d065
neureka.backend.main.implementations.convolution.AbstractCPUConvolutiond22ad3c3a4c23481
neureka.backend.main.implementations.convolution.CLConvolution4de13aa5ad54c778
neureka.backend.main.implementations.convolution.CPUConvolution3fce8b2d33a613ef
neureka.backend.main.implementations.convolution.CPUConvolution.10f04b1f802dd5010
neureka.backend.main.implementations.convolution.SimpleCPUConvolution21cfd96a824197ea
neureka.backend.main.implementations.convolution.SimpleCPUConvolution.ImplF320faae94e9985c6fc
neureka.backend.main.implementations.convolution.SimpleCPUConvolution.ImplF64ba99fd27c437ab0a
neureka.backend.main.implementations.elementwise.CLBiElementwiseeb3561112cbf5473
neureka.backend.main.implementations.elementwise.CLBiElementwiseAddition2b830f68f91f0967
neureka.backend.main.implementations.elementwise.CLBiElementwiseDivision60d4a04ab6e1d65d
neureka.backend.main.implementations.elementwise.CLBiElementwiseModuloa3bc7a3ab1953a74
neureka.backend.main.implementations.elementwise.CLBiElementwiseMultiplication6cb7fc543135b0b1
neureka.backend.main.implementations.elementwise.CLBiElementwisePowere3895c9132022f3b
neureka.backend.main.implementations.elementwise.CLBiElementwiseSubtraction4354a9fba49fd73b
neureka.backend.main.implementations.elementwise.CLElementwiseFunction32fcaff8000d4bee
neureka.backend.main.implementations.elementwise.CLRandomization18dda32982dbe1d5
neureka.backend.main.implementations.elementwise.CPUBiElementWise7f7d3a9d832a04dd
neureka.backend.main.implementations.elementwise.CPUBiElementWiseAdditionde18b8f2316093b9
neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition.16bd1366598252b3f
neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition.2c4cd376f1142936f
neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivisionf1d1bfbf32009356
neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision.1eb97f0195d5d7ef3
neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision.2667ee01190bae921
neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulocb451402d0b7163e
neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo.1c83040926acb68ac
neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo.2ebbbda4d8d603dd5
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication07b97704d397e972
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication.1d2de9f084d936fbb
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication.2ee796cc565a8a331
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication.32c08464f9107f780
neureka.backend.main.implementations.elementwise.CPUBiElementWisePowerd03aa00bd8304306
neureka.backend.main.implementations.elementwise.CPUBiElementWisePower.1671b663aa165c91e
neureka.backend.main.implementations.elementwise.CPUBiElementWisePower.2c17a7f6bfcbbe931
neureka.backend.main.implementations.elementwise.CPUBiElementWisePower.3d9cf1445d03801f9
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction872ae4776d45e5e6
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction.17dcf3574b65e12bd
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction.22863098ad28e54b3
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction.35b5dc56e23c81371
neureka.backend.main.implementations.elementwise.CPUElementwiseAssignFun582547ad9e0f53cb
neureka.backend.main.implementations.elementwise.CPUElementwiseFunction32281d3e5c73b8c8
neureka.backend.main.implementations.elementwise.CPURandomization31d529075b472b5f
neureka.backend.main.implementations.elementwise.CPURandomization.1b7cc0a2dae837ecf
neureka.backend.main.implementations.elementwise.CPURandomization.2085714a721a26ccf
neureka.backend.main.implementations.elementwise.CPURandomization.gaussianFrom643ed27ca53d8909
neureka.backend.main.implementations.fun.FunUtil497d33483fab90dc
neureka.backend.main.implementations.fun.ScalarAbsolute4c3c78b21d1b9dda
neureka.backend.main.implementations.fun.ScalarAbsolute.198312ca8693bae31
neureka.backend.main.implementations.fun.ScalarAbsolute.224dbfc0073cbf6e3
neureka.backend.main.implementations.fun.ScalarCbrtfde59262870fda40
neureka.backend.main.implementations.fun.ScalarCbrt.1c6c9948708f1595e
neureka.backend.main.implementations.fun.ScalarCosinus6ba2db0e1748b8f5
neureka.backend.main.implementations.fun.ScalarCosinus.1314f5d5790f29b1a
neureka.backend.main.implementations.fun.ScalarCosinus.26fe5b0da98f1e0b8
neureka.backend.main.implementations.fun.ScalarExpdbc11d7d71b1f2ad
neureka.backend.main.implementations.fun.ScalarExp.156c176841136c496
neureka.backend.main.implementations.fun.ScalarExp.2e442cd5b7b113ec5
neureka.backend.main.implementations.fun.ScalarGaSU6e101dcbcf509f84
neureka.backend.main.implementations.fun.ScalarGaSU.1a88e22c9322d0625
neureka.backend.main.implementations.fun.ScalarGaSU.25eb8304b744d67f4
neureka.backend.main.implementations.fun.ScalarGaTU37f1b216bf0e81e6
neureka.backend.main.implementations.fun.ScalarGaTU.1e4d8c70a2bcb64b0
neureka.backend.main.implementations.fun.ScalarGaTU.25c136781f7fddd12
neureka.backend.main.implementations.fun.ScalarGaussian57cdddbfa78c8618
neureka.backend.main.implementations.fun.ScalarGaussian.11a13d9d6e1410a40
neureka.backend.main.implementations.fun.ScalarGaussianFast98cd77be78483dc9
neureka.backend.main.implementations.fun.ScalarGaussianFast.1369d42900380e170
neureka.backend.main.implementations.fun.ScalarGaussianFast.274dbf7ac91567786
neureka.backend.main.implementations.fun.ScalarGeLU9e6d2826a66da927
neureka.backend.main.implementations.fun.ScalarGeLU.19461351707731faa
neureka.backend.main.implementations.fun.ScalarGeLU.2c5759cfb26f0ec2a
neureka.backend.main.implementations.fun.ScalarIdentitycd0648806051a413
neureka.backend.main.implementations.fun.ScalarIdentity.1e1986f863b7bddc5
neureka.backend.main.implementations.fun.ScalarLog10a231b197d84ae708
neureka.backend.main.implementations.fun.ScalarLog10.106810c491c593396
neureka.backend.main.implementations.fun.ScalarLogarithm5595e8b4c66c3abe
neureka.backend.main.implementations.fun.ScalarLogarithm.11b55d60afad7763e
neureka.backend.main.implementations.fun.ScalarLogarithm.2a232f4173c46b3f1
neureka.backend.main.implementations.fun.ScalarQuadratice4d4bb334f01dbab
neureka.backend.main.implementations.fun.ScalarQuadratic.1adff2ee918d5f35b
neureka.backend.main.implementations.fun.ScalarQuadratic.2fd0a243be0d1efdf
neureka.backend.main.implementations.fun.ScalarReLU618cf5991e6a6cfa
neureka.backend.main.implementations.fun.ScalarReLU.1cb5d818e542bfc6b
neureka.backend.main.implementations.fun.ScalarReLU.2f71212f482cb416a
neureka.backend.main.implementations.fun.ScalarSeLUef219fa8797c3a7d
neureka.backend.main.implementations.fun.ScalarSeLU.120a4eb0f1847e90b
neureka.backend.main.implementations.fun.ScalarSeLU.28eea27c35b8d9068
neureka.backend.main.implementations.fun.ScalarSiLU509f6cb87dcbfcc4
neureka.backend.main.implementations.fun.ScalarSiLU.1b9d7f1867a635988
neureka.backend.main.implementations.fun.ScalarSiLU.2ea684ad91b769f23
neureka.backend.main.implementations.fun.ScalarSigmoide7c5bd40d041228c
neureka.backend.main.implementations.fun.ScalarSigmoid.15d8253a19738e5a6
neureka.backend.main.implementations.fun.ScalarSigmoid.2c9c499aed68e3bfa
neureka.backend.main.implementations.fun.ScalarSinuse125cd6244f95313
neureka.backend.main.implementations.fun.ScalarSinus.181e72affba411d81
neureka.backend.main.implementations.fun.ScalarSinus.2bbef64d1d5cca8b5
neureka.backend.main.implementations.fun.ScalarSoftplus7d5336fb885b1c1a
neureka.backend.main.implementations.fun.ScalarSoftplus.1636c8264b125f353
neureka.backend.main.implementations.fun.ScalarSoftplus.2ea4e093556ad5508
neureka.backend.main.implementations.fun.ScalarSoftsign77632961c65eba3f
neureka.backend.main.implementations.fun.ScalarSoftsign.1bfd1dc9f25e75022
neureka.backend.main.implementations.fun.ScalarSoftsign.284485c0872cedcb8
neureka.backend.main.implementations.fun.ScalarSqrtdf0eecfe5d2550bc
neureka.backend.main.implementations.fun.ScalarSqrt.18aa830c7c6b1ef2e
neureka.backend.main.implementations.fun.ScalarTanh4567e8887d39c956
neureka.backend.main.implementations.fun.ScalarTanh.184ccdad9acf7f414
neureka.backend.main.implementations.fun.ScalarTanh.2b0252c0ccca7c6c2
neureka.backend.main.implementations.fun.ScalarTanhFastfbff3ff275b7c273
neureka.backend.main.implementations.fun.ScalarTanhFast.1b790f118f8616ece
neureka.backend.main.implementations.fun.ScalarTanhFast.203946b830a47dafb
neureka.backend.main.implementations.fun.api.CPUBiFun0614811a97385e9b
neureka.backend.main.implementations.fun.api.CPUFun6805bbee81efee82
neureka.backend.main.implementations.fun.api.ScalarFuncf4f776adb114157
neureka.backend.main.implementations.linear.CLDot9c3e6ee4e0106020
neureka.backend.main.implementations.linear.CPUDot86f7149173d1ffde
neureka.backend.main.implementations.matmul.CLMatMul68c59ba3cef155ef
neureka.backend.main.implementations.matmul.CPUMatMulb6fbaf89dc1c2f66
neureka.backend.main.implementations.matmul.CPUMatMul.execute2af28af1b99e6c59
neureka.backend.main.implementations.matmul.CPUMatMul.execute.09f930bf8e5551bb7
neureka.backend.main.implementations.matmul.CPUMatMul.execute.1e5c3f33f5abea2e4
neureka.backend.main.implementations.matmul.CPUMatMul.execute.26b3e70a49a8ba286
neureka.backend.main.implementations.scalar.CLScalarFunction58dc1e644f18e576
neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction2d4ac3b41b4fd1c6
neureka.backend.main.implementations.scalar.CPUScalarFunction8734d92ca95e002c
neureka.backend.main.memory.MemUtil674a4487319caae3
neureka.backend.main.memory.MemValidatorff3a6739a949d5b5
neureka.backend.main.operations.ConvUtilf9f923016c5ea976
neureka.backend.main.operations.ElemWiseUtild3cefc0181e4f4b9
neureka.backend.main.operations.functions.Absolutef23456178f8fdc0f
neureka.backend.main.operations.functions.AbstractActivationOperationeb4c0277448ff0ac
neureka.backend.main.operations.functions.Cbrt8f2a677a075d4da7
neureka.backend.main.operations.functions.Cosinus2e89345ad28e6a2f
neureka.backend.main.operations.functions.Exp946600ebc4bef579
neureka.backend.main.operations.functions.GaSU1d9bacdd7872c3d6
neureka.backend.main.operations.functions.GaTU77ee17acfee858cc
neureka.backend.main.operations.functions.Gaussian5097586b6fe6320a
neureka.backend.main.operations.functions.GaussianFast2c98a6ab3773278b
neureka.backend.main.operations.functions.GeLU0d354c3385f3c970
neureka.backend.main.operations.functions.Identity30b678d0afc9248c
neureka.backend.main.operations.functions.Log10abbc3027d4cde767
neureka.backend.main.operations.functions.Logarithmaae32c22f32949bb
neureka.backend.main.operations.functions.Quadratic972d8ac492fddfa0
neureka.backend.main.operations.functions.ReLU1b079b36316b4127
neureka.backend.main.operations.functions.SeLU65614b17fefdb369
neureka.backend.main.operations.functions.SiLU4bfee6d2daca6654
neureka.backend.main.operations.functions.Sigmoid4bf455f9347a157f
neureka.backend.main.operations.functions.Sinus7b6c3c7990ef9bdc
neureka.backend.main.operations.functions.Softplus75adcf30ca2ca60e
neureka.backend.main.operations.functions.Softsign08e7a3c12d07c3e9
neureka.backend.main.operations.functions.Sqrt22517177d5ceeff6
neureka.backend.main.operations.functions.Tanhd2d214f2add038ec
neureka.backend.main.operations.functions.TanhFast977a2058e9d8ab5c
neureka.backend.main.operations.indexer.Producte2fdaa0939539c03
neureka.backend.main.operations.indexer.Summation9d99fc49c3cfc8ce
neureka.backend.main.operations.linear.Convolution73ddcda165f8d9d3
neureka.backend.main.operations.linear.DotProduct19f4d90be8e1a5f6
neureka.backend.main.operations.linear.MatMul90c159a23267ce12
neureka.backend.main.operations.linear.XConvLeft08c6181b7b4600c1
neureka.backend.main.operations.linear.XConvRightc9c1a501cdfeacd5
neureka.backend.main.operations.linear.internal.blas.AXPY854726ada74f4604
neureka.backend.main.operations.linear.internal.blas.COPY076a7249e8ba89b7
neureka.backend.main.operations.linear.internal.blas.DOT729f974218b57b00
neureka.backend.main.operations.linear.internal.blas.GEMM95aa26bec23b01e5
neureka.backend.main.operations.linear.internal.blas.IAXPYc153f56aaaf4a5a7
neureka.backend.main.operations.linear.internal.blas.IDOT9a0a3ce147243df1
neureka.backend.main.operations.linear.internal.blas.IGEMM8a2243b1d641f78e
neureka.backend.main.operations.linear.internal.opencl.CLGEMM86927517db0ea7d8
neureka.backend.main.operations.linear.internal.opencl.CLReduce1e4fa7d9936c2b63
neureka.backend.main.operations.linear.internal.opencl.CLReduce.16492a27a152fb7ef
neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type2a84f355f022019d
neureka.backend.main.operations.linear.internal.opencl.CLSum67b37b97292c68a6
neureka.backend.main.operations.operator.Addition7384a8e0a8860a0b
neureka.backend.main.operations.operator.Divisione78412d914d09387
neureka.backend.main.operations.operator.Modulo43cc02aadbb83ed7
neureka.backend.main.operations.operator.Multiplication27cb9bf3cf360e4b
neureka.backend.main.operations.operator.Power279dae8e592b63f2
neureka.backend.main.operations.operator.Subtractionfd378a931fa3f41f
neureka.backend.main.operations.operator.Util5a92642176c72317
neureka.backend.main.operations.other.AssignLeft2f2ad1359d9964d3
neureka.backend.main.operations.other.Cat0deaab6b74de5b24
neureka.backend.main.operations.other.DimTrim43f0f8b6f92ae944
neureka.backend.main.operations.other.Max15b50dbab185f072
neureka.backend.main.operations.other.Min39dcf7c52273cbac
neureka.backend.main.operations.other.Permutebf63c3b209e1f6d8
neureka.backend.main.operations.other.Randomization383cb76dd9b71f9d
neureka.backend.main.operations.other.ReLayoutb61aeedb2e6e8ecc
neureka.backend.main.operations.other.Reshape41d8f811784ac193
neureka.backend.main.operations.other.Sliced121d301e76eda4b
neureka.backend.main.operations.other.Sumd39d1b364577fc60
neureka.backend.main.operations.other.internal.CPUReduceb6aeae9cfa654dc9
neureka.backend.main.operations.other.internal.CPUReduce.1a1be3a08db76708e
neureka.backend.main.operations.other.internal.CPUReduce.Type3995581828a26619
neureka.backend.main.operations.other.internal.CPUSum9a45042c405d5c82
neureka.backend.ocl.CLBackend4f1b9ee960c17002
neureka.backend.ocl.CLBackend.getPlatforms.0bc0c7061c2d35087
neureka.backend.ocl.CLBackend.getSettings81d6e4accc6dae7b
neureka.backend.ocl.CLSettingsd957ce2f2b84a504
neureka.common.composition.AbstractComponentOwnercfd958395cddda18
neureka.common.composition.AbstractComponentOwner.12f9b7fff0326506e
neureka.common.composition.AbstractComponentOwner.291a76c4da55094f9
neureka.common.composition.Component2db43a4e255ecf75
neureka.common.composition.Component.IsBeingc9df516c9961e5f9
neureka.common.composition.Component.OwnerChangeRequestbc52a96efd0bca65
neureka.common.composition.Component.updatebe4af9d19c889f9c
neureka.common.composition.ComponentOwner.get585f16d92efc5730
neureka.common.composition.ComponentOwner.set.0c787798c4d12b4e5
neureka.common.utility.Cached33a5fd19dfe9c10
neureka.common.utility.Cache.LazyEntry0a0500d2ff317e17
neureka.common.utility.DataConverter69044ea327b7f6e1
neureka.common.utility.DataConverter.ForTensor2d6ba22051245a25
neureka.common.utility.DataConverter.Utilityb733f514aad0c2bc
neureka.common.utility.DataConverter.Utility.objBooleansToPrimBooleans.5119cccdf4f02462f
neureka.common.utility.DataConverter.Utility.objBytesToPrimBytes.4360998785b93925a
neureka.common.utility.DataConverter.Utility.objCharsToPrimChars.674f64489dd4fccb1
neureka.common.utility.DataConverter.Utility.objDoublesToPrimDoubles.043702f44986f3ee8
neureka.common.utility.DataConverter.Utility.objFloatsToPrimFloatsfe5ccf5a232f954c
neureka.common.utility.DataConverter.Utility.objIntsToPrimInts.1298acacdd2ab6482
neureka.common.utility.DataConverter.Utility.objLongsToPrimLongs.28ffa12128e9aada9
neureka.common.utility.DataConverter.Utility.objShortsToPrimShorts.3ce4b2c0d6ed72d35
neureka.common.utility.DataConverter.getaaf4867a2bffba68
neureka.common.utility.ListReaderf4e779681eb96993
neureka.common.utility.ListReader.Result0102f008cf2024f2
neureka.common.utility.LogUtilbb2216c803bcdcfe
neureka.common.utility.SettingsLoaderc73099cac15f8123
neureka.common.utility.SettingsLoader.TypeChecker67b228c04d8affc0
neureka.devices.AbstractBaseDevicefca90e0bac9f468d
neureka.devices.AbstractDevice2c3c17641081ce91
neureka.devices.AbstractDevice.1f853198e3ba67499
neureka.devices.AbstractDevice.1.18f7badeb52b4ffa0
neureka.devices.AbstractDevice.1.2992426c640e29d4f
neureka.devices.AbstractDeviceData0a9eb08ea3303219
neureka.devices.CustomDeviceCleanerd3aa198fc3ec1dbb
neureka.devices.CustomDeviceCleaner.ReferenceWithCleanup65cf617cfa37d3cf
neureka.devices.Device65d797d19b358496
neureka.devices.Device.14415f5dcc673ee87
neureka.devices.Device.Access1ab36df81349ce89
neureka.devices.Device.Access.readArray.07aa2df4375aad9d6
neureka.devices.Device.Access.readAtd8d8184628dd07e3
neureka.devices.Device.Access.writeFrom.178900dfb23eff386
neureka.devices.Device.Writer1f2e726960fcd0f7
neureka.devices.Device.Writer.intoRangea912e5cdf63b3ff0
neureka.devices.Device.access.0d8f07a4b09d08264
neureka.devices.Device.free.1408c41898cafc075
neureka.devices.Device.has.2893efb22ad704f74
neureka.devices.Device.numberOfDataObjectsb4486e67815c772c
neureka.devices.Device.optimizedFunctionOf.3d4fefc2576e8fd0c
neureka.devices.DeviceCleaner21a47d667336f4c6
neureka.devices.DeviceCleaner.getNewInstance31b1693187e381b1
neureka.devices.DeviceCleaner.register.065a79e5053636327
neureka.devices.Query1a4f6c7464e6a8ae
neureka.devices.ReferenceCounter80a44b0b5272de0b
neureka.devices.ReferenceCounter.ChangeEventdc760307c14e64fb
neureka.devices.ReferenceCounter.ChangeType424cdf2caa44cc2b
neureka.devices.Storage.numberOfStored1c21189a9a6fff69
neureka.devices.Storage.restore.15ab66f543e51264a
neureka.devices.Storage.store.09aec34bbcd1646f5
neureka.devices.file.AbstractFileHandleadffe8fd068b7459
neureka.devices.file.AbstractImageFileHandle124263a0d07e4467
neureka.devices.file.CSVHandle0d20678be86407a7
neureka.devices.file.CSVHandle.CSVType4585e37024cb2daf
neureka.devices.file.FileDevice34b66b1cd0281118
neureka.devices.file.FileDevice.1a2ef666456220cfb
neureka.devices.file.FileDevice.at8272abc1aaad6694
neureka.devices.file.FileDevice.load.19997715ad6c94de5
neureka.devices.file.FileDevice.load.2599fec2d1bfa7389
neureka.devices.file.FileDevice.store.0c7184c790f8cad2d
neureka.devices.file.FileHandle6da4c5ed43608e2c
neureka.devices.file.FileHandle.free.0c62f8b437e14d8a8
neureka.devices.file.FileHandle.load95c9afde63c049ec
neureka.devices.file.HandleFactory8c2ec7e7969d856c
neureka.devices.file.IDXHandlebb0011fb375a442f
neureka.devices.file.IDXHandle.IDXTypee419700fa3f7b532
neureka.devices.file.ImageFileType1a18b2e3c0e15e69
neureka.devices.file.JPEGHandle021d86a85fb4a383
neureka.devices.file.JPEGHandle.131bdf54da1fa17b8
neureka.devices.file.NumberReader48d2d3a70f56ba0c
neureka.devices.file.PNGHandle42cfc77537665c32
neureka.devices.file.PNGHandle.15d849f4ba858a26a
neureka.devices.host.CPUb19b7ecb4fbda563
neureka.devices.host.CPU.JVMExecutord0f708aa451c35f2
neureka.devices.host.CPU.JVMExecutor.SpockMock.642851333f8a3d6f112c7d2ac
neureka.devices.host.CPU.JVMExecutor.SpockMock.642851333.auxiliary.76Kmb7sG23a8c6829337e371
neureka.devices.host.CPU.JVMExecutor.SpockMock.642851333.auxiliary.wVBhFaRH53b0046643c2ad2a
neureka.devices.host.CPU.JVMExecutor.getActiveThreadCount.04dc262aa3039e810
neureka.devices.host.CPU.JVMExecutor.getCorePoolSize6a37d25c8cc5966e
neureka.devices.host.CPU.SpockMock.1984573950578105189320dcb4
neureka.devices.host.CPU.SpockMock.1984573950.auxiliary.HrVCL1CAb8d1018e6877d658
neureka.devices.host.CPU.SpockMock.1984573950.auxiliary.U77QO19Ve0b84ddbf1241561
neureka.devices.host.CPU.get.0830f8bc4d6830d86
neureka.devices.host.CPU.getExecutor54fe967a34563748
neureka.devices.host.CPUDatab2efa4ddea8e83d5
neureka.devices.host.concurrent.Parallelism85e7473c71d68746
neureka.devices.host.concurrent.WorkScheduler.Dividereae4245dbd14f374
neureka.devices.host.machine.BasicMachineff3417b32b67999e
neureka.devices.host.machine.CommonMachine608eb6124c602d23
neureka.devices.host.machine.ConcreteMachine7b98debcd9406af9
neureka.devices.host.machine.Hardwareaedfeae5140cd1ad
neureka.devices.opencl.JVMData54f473c12c12b9e5
neureka.devices.opencl.KernelCachebc3a25a2c16c2ab3
neureka.devices.opencl.KernelCache.1490439d34ee08549
neureka.devices.opencl.KernelCaller0d7a2b1e4f24390c
neureka.devices.opencl.KernelCaller.SpockMock.1988945193af752e5e18ff5144
neureka.devices.opencl.KernelCaller.SpockMock.1988945193.auxiliary.03wIfhu023c581ab508b11f3
neureka.devices.opencl.KernelCaller.SpockMock.1988945193.auxiliary.DSzEXYTh9ebff8a057986642
neureka.devices.opencl.KernelCaller.SpockMock.1988945193.auxiliary.SJmnZBY6efd02addf2992ae0
neureka.devices.opencl.KernelCaller.SpockMock.1988945193.auxiliary.V2YGQHoz369d3fbed5a3fab9
neureka.devices.opencl.KernelCaller.SpockMock.1988945193.auxiliary.ec144nOY367ff79170bb9858
neureka.devices.opencl.KernelCaller.SpockMock.1988945193.auxiliary.tqqMMznhb9cff6f00416677b
neureka.devices.opencl.KernelCaller.call.1df48f640bd9f9742
neureka.devices.opencl.KernelCaller.call.373a0bfd193b35c00
neureka.devices.opencl.KernelCaller.passcc83680d46b01f8c
neureka.devices.opencl.KernelCaller.pass.095142f7fe442a095
neureka.devices.opencl.KernelCaller.pass.2b480e841f98cfec6
neureka.devices.opencl.KernelCode1b79e633140184f8
neureka.devices.opencl.OpenCLDevice42f7dfeea67583fc
neureka.devices.opencl.OpenCLDevice.136b9c0616a1292d2
neureka.devices.opencl.OpenCLDevice.CLData315ac8e526471b8d
neureka.devices.opencl.OpenCLDevice.Queryb48443be5bfb9b9b
neureka.devices.opencl.OpenCLDevice.SpockMock.18650288461825e8767ac3dd5b
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.2WSW1bN30fd662d51584cc86
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.5U4RJ7Ajade513bbe16481dd
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.6o7zCCIM80b25572b9e22de4
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.9Drqf5A4e71429cc24232167
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.OGrZ5ldV7970d8db719e0eb3
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.Z0ZNTeOg32f44b93be0ce7ca
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.cURPoi9R9d39e1fc44e828b0
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.dRXDF84ucbd9e967e897b934
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.froKzgOoe177837713a226e4
neureka.devices.opencl.OpenCLDevice.SpockMock.1865028846.auxiliary.gERJmYmYf7bd3aac86cc5828
neureka.devices.opencl.OpenCLDevice.Type5bf67913037656bc
neureka.devices.opencl.OpenCLDevice.cl_ad_hoc9a31ca82bd132c3a
neureka.devices.opencl.OpenCLDevice.cl_configf0eaf20b050c757b
neureka.devices.opencl.OpenCLDevice.cl_dtype8c8ed9fe3563c883
neureka.devices.opencl.OpenCLDevice.cl_tsrc6f74aa3afea8cde
neureka.devices.opencl.OpenCLDevice.cl_tsr.cl_value5fc2b3095297374b
neureka.devices.opencl.OpenCLDevice.compileAdHocKerneld47a72e7c00e42fc
neureka.devices.opencl.OpenCLDevice.findAdHocKernel.0886833d3afc6b2b9
neureka.devices.opencl.OpenCLPlatformc34879ec4431fd2b
neureka.devices.opencl.utility.CLFunctionCompiler02e117e4add0995c
neureka.devices.opencl.utility.CLFunctionCompiler.optimize0b1d25b78c6d542f
neureka.devices.opencl.utility.DeviceQueryb0f236eef120f0c8
neureka.devices.opencl.utility.DeviceQuery.querydf6faafc48689fa9
neureka.devices.opencl.utility.Messages71f9b1e1082ce71f
neureka.devices.opencl.utility.Messages.Tips143c7a3fc2267e0e
neureka.dtype.DataType423533cc565291d7
neureka.dtype.DataType.127f9d083c5914084
neureka.dtype.DataType.dataArrayType.0bfdb75ff19c6d57d
neureka.dtype.DataType.ofaf699213e38e9545
neureka.dtype.NumericType.convertToHolder.072823672016bd267
neureka.dtype.NumericType.foreignHolderBytesToTargeteaa764914cc8798a
neureka.dtype.custom.AbstractNumericTypeee0d91074b185f6b
neureka.dtype.custom.F32262cd0e1e8a1d956
neureka.dtype.custom.F32.targetToForeignHolderBytes502458d0a340ade2
neureka.dtype.custom.F64ce4db07cbac6f42d
neureka.dtype.custom.F64.targetToForeignHolderBytes29aff4561b1bc0c5
neureka.dtype.custom.I1628bbdd263b80e82d
neureka.dtype.custom.I16.targetToForeignHolderBytes25de7eabfe804cb0
neureka.dtype.custom.I32844cadbff64f75ab
neureka.dtype.custom.I32.targetToForeignHolderBytesa288337aaa673c04
neureka.dtype.custom.I64a0a118e489e7b58e
neureka.dtype.custom.I64.targetToForeignHolderBytes4c77a1fae7c63286
neureka.dtype.custom.I89c35ca1bedd5fd3f
neureka.dtype.custom.I8.targetToForeignHolderBytes973a9cc2b4903d14
neureka.dtype.custom.UI1676ac1db1778ba158
neureka.dtype.custom.UI16.targetToForeignHolderBytesb164ce06e01d8a58
neureka.dtype.custom.UI32e517434b01ed0b34
neureka.dtype.custom.UI32.targetToForeignHolderBytes0e2324d461cab501
neureka.dtype.custom.UI645e88d5e71fd304c6
neureka.dtype.custom.UI64.targetToForeignHolderBytesd04dbbce6be22c35
neureka.dtype.custom.UI8459d05f0dab73b7b
neureka.dtype.custom.UI8.targetToForeignHolderBytesb3072490e6d3afcf
neureka.fluent.building.NdaBuilder9665b98bd3428a83
neureka.fluent.building.states.IterByOrIterFromOrAllTensor4f33ee2c89059f6f
neureka.fluent.building.states.IterByOrIterFromOrAllTensor.all09d34d743552eff8
neureka.fluent.building.states.IterByOrIterFromOrAllTensor.andFill.1cbe948967e00b717
neureka.fluent.building.states.IterByOrIterFromOrAllTensor.andFill.26361ab6dfddf27a3
neureka.fluent.building.states.IterByOrIterFromOrAllTensor.andFillFrom.3a5e18a174463e0f5
neureka.fluent.building.states.IterByOrIterFromOrAllTensor.andSeed.0b6fdca0ef8b1b90e
neureka.fluent.building.states.IterByOrIterFromOrAllTensor.andWhere.45d3dfe473c5f5fe9
neureka.fluent.building.states.StepForTensor.step0ba3ff1899deaba1
neureka.fluent.building.states.ToForTensor.tof40bcdd2b80d267d
neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor7841495802eecb08
neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor.scalara671947fe54bf508
neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor.vector.19648b634f7b33d57
neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor.withShape.004ec9ffe03c93a2e
neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor.withShape.273a2017dbb115ebd
neureka.fluent.slicing.AxisSliceBuilder4a9c2b942e5cf2a0
neureka.fluent.slicing.SliceBuilder92c92d09b9e06075
neureka.fluent.slicing.SmartSlicerfcd76a6918ee2b2a
neureka.fluent.slicing.states.AxisOrGetTensor.axisaacaf4440e92827d
neureka.fluent.slicing.states.FromOrAtTensor.at.070c57ac61fe2ce05
neureka.fluent.slicing.states.FromOrAtTensor.from8ff6eb33a71a36d2
neureka.fluent.slicing.states.StepsOrAxisOrGetTensor.detached.1653e90622d693eef
neureka.fluent.slicing.states.StepsOrAxisOrGetTensor.get9249408277cc99a8
neureka.fluent.slicing.states.StepsOrAxisOrGetTensor.step.0a13166f5c48cad4a
neureka.fluent.slicing.states.ToForTensor.toebebc0a8779f7433
neureka.framing.NDFrame894be2c150946e2a
neureka.framing.NDFrame.atAxis.0b6878d268d22db0d
neureka.framing.NDFrame.toStringd70bf062cec66f27
neureka.framing.Relation82d71aaf1605c9c9
neureka.framing.Relation.SpockMock.203441603994cb7e9aed8f2793
neureka.framing.Relation.SpockMock.2034416039.auxiliary.7iALQDU65f76bdac42e91983
neureka.framing.Relation.SpockMock.2034416039.auxiliary.RT0XpYJS18338691adebed53
neureka.framing.Relation.SpockMock.2034416039.auxiliary.mlzGtw7T19783feaca1da89c
neureka.framing.Relation.newParentToChildren6ef1bb3c27cc4e55
neureka.framing.fluent.AxisFrame0571883fadbc09f3
neureka.framing.fluent.AxisFrame.Builderb8e4bc327eac4457
neureka.framing.fluent.AxisFrame.replace32f1ff4d5fb8fc1a
neureka.framing.fluent.With.withcfe608faf5727c4b
neureka.math.Functionf3794b8b66039bc8
neureka.math.Function.Callable904d276a2b5906c7
neureka.math.Function.call.0e6d50b304bac0329
neureka.math.Function.call.3d5104ff753df1cff
neureka.math.Function.call.4df45d36e1d3cc6cf
neureka.math.Function.call.6ea20a961d4bfa2af
neureka.math.Function.derive.1b9853be491656e28
neureka.math.Function.invoke.5949aa4299c7dfe67
neureka.math.Function.ofb181ebba9f470dc8
neureka.math.Function.of.2514ed78e10f28d8d
neureka.math.FunctionCachef116beb0e6a0adc2
neureka.math.FunctionCache.19ba1d108e8a45321
neureka.math.Functions993581ae6da772f3
neureka.math.Functions.abs.82a2bd818485d056b
neureka.math.Functions.cos.1077f78bb18a2e07cc
neureka.math.Functions.fastGaus.1f9f0327f7155e55b
neureka.math.Functions.fastTanh.4783c3a11ca36d07c
neureka.math.Functions.gaus.0fe0149c4cdbd75f2
neureka.math.Functions.gelu.13494daceec6861003
neureka.math.Functions.ln0f1b66c0d3bfaba8
neureka.math.Functions.quad.65a4538cc25321f9b
neureka.math.Functions.relu.73101bcca0f8517fa
neureka.math.Functions.selu.14ed7ee9335714a0af
neureka.math.Functions.sigmoid.2b48e742636faeb53
neureka.math.Functions.silu.1280ef88dfd057e098
neureka.math.Functions.sin.9a5d5470b1a0f310f
neureka.math.Functions.softplus.11971b375a6b09c4cc
neureka.math.Functions.softsign.5894d4cdcbfe0e078
neureka.math.Functions.tanh.33e80d2e592bc9a04
neureka.math.args.Argbf17249b1f3b815b
neureka.math.args.Arg.Axis6af36ecb2a8e8d17
neureka.math.args.Arg.Axis.of7eba4c2e38fa2605
neureka.math.args.Arg.DerivIdxe4a92b525fcf0dee
neureka.math.args.Arg.Derivative03b95e3a8a152dc0
neureka.math.args.Arg.Indices8f7b3762d5191f34
neureka.math.args.Arg.Layoutca8f9db1233de37a
neureka.math.args.Arg.Offset312fe7df812ca65a
neureka.math.args.Arg.Offset.of466dd3d0cb54d144
neureka.math.args.Arg.Seed60e6abc6e8438e6d
neureka.math.args.Arg.Seed.of00a94b9f4585bcaf
neureka.math.args.Arg.Shape9d544f25d450e8bf
neureka.math.args.Arg.Shape.ofbca766f84f4af11a
neureka.math.args.Arg.Strided1c2a5bed856b026
neureka.math.args.Arg.Stride.ofe716d12a4a5f73d5
neureka.math.args.Arg.TargetDevice2a3358defd48bda5
neureka.math.args.Arg.VarIdxecb71b5b1ee81b07
neureka.math.args.Argse36d0bad98163a65
neureka.math.implementations.FunctionConstant4b67cbeacf86cead
neureka.math.implementations.FunctionInputa646ee2eff4f2298
neureka.math.implementations.FunctionInput.SpockMock.12543123239e8ce34006f024df
neureka.math.implementations.FunctionInput.SpockMock.1254312323.auxiliary.zzkk7IMjfb31854359f66965
neureka.math.implementations.FunctionNode76f3043309523f56
neureka.math.implementations.FunctionVariablee7a030c3224f9743
neureka.math.parsing.FunctionParser04360d1d89700db1
neureka.math.parsing.FunctionParser.parse54f4f095fa0bacfe
neureka.math.parsing.ParseUtilc30c0ae4562c3865
neureka.ndim.NDConstructor8926ccc132f88e7a
neureka.ndim.NDConstructor.107e7c9d87dd5a826
neureka.ndim.NDConstructor.2e5987c8f4538897a
neureka.ndim.NDUtil3760ffa09b3bc590
neureka.ndim.NDimensional2ab06f8df84f8b95
neureka.ndim.NDimensional.getNDConf.1314082e8090f5126
neureka.ndim.NDimensional.indexOfIndices.2c4943db64c1dfee7
neureka.ndim.NDimensional.indicesOfIndex.31a48c0c9d96ff6cb
neureka.ndim.NDimensional.shape.093290ad5ab46c6fc
neureka.ndim.NDimensional.size072723774031dcdf
neureka.ndim.config.AbstractNDCc43db58a7e75704d
neureka.ndim.config.NDConfigurationd2d9bec569d6c495
neureka.ndim.config.NDConfiguration.Layoute789332814866d5b
neureka.ndim.config.NDConfiguration.Utility2f07df23afc097ce
neureka.ndim.config.NDConfiguration.indexOfIndex.3327677d92f41e172
neureka.ndim.config.NDConfiguration.indexOfIndices.297d44a964443f499
neureka.ndim.config.NDConfiguration.indicesOfIndex.185c66f8f11216e3e
neureka.ndim.config.NDConfiguration.of.04f3ca429009677b4
neureka.ndim.config.NDConfiguration.shape4038e6b5933ea733
neureka.ndim.config.NDConfiguration.size.4bcd878183fd23125
neureka.ndim.config.NDTrait0f515b258f684580
neureka.ndim.config.types.D1Cba2880bb738427d8
neureka.ndim.config.types.D2Cbdd1c7d8b2b5438d
neureka.ndim.config.types.D3C7f444da6ba083a6c
neureka.ndim.config.types.permuted.Permuted1DConfigurationf2194d4c38977b10
neureka.ndim.config.types.permuted.Permuted2DConfiguration9981520352d3d771
neureka.ndim.config.types.permuted.Permuted3DConfiguration5d7fb107066be52b
neureka.ndim.config.types.permuted.PermutedNDConfiguration41939630cb304c75
neureka.ndim.config.types.simple.Simple0DConfiguration9af383469909bd94
neureka.ndim.config.types.simple.Simple1DConfigurationedd12e4600715c62
neureka.ndim.config.types.simple.Simple2DConfigurationc23b3119246bc7e0
neureka.ndim.config.types.simple.Simple3DConfiguration608bd6c316e1f62e
neureka.ndim.config.types.simple.SimpleNDConfiguration4a65cb04d51a87db
neureka.ndim.config.types.sliced.Sliced0DConfiguration83363499b4d07c88
neureka.ndim.config.types.sliced.Sliced1DConfiguration6402415f6a25ddf7
neureka.ndim.config.types.sliced.Sliced2DConfiguration5ce85767aefa3302
neureka.ndim.config.types.sliced.Sliced3DConfiguration04dc49c8f0619b41
neureka.ndim.config.types.sliced.SlicedNDConfiguration0a0e4b7c0c9dc821
neureka.ndim.config.types.sliced.SlicedNDConfiguration.construct55cca160844a3b7e
neureka.ndim.config.types.views.SimpleReshapeView77b63df4b354fca5
neureka.ndim.config.types.views.virtual.VirtualNDConfigurationc6d6bfc201008132
neureka.ndim.iterator.NDIterator6095f1e0b351a05a
neureka.ndim.iterator.NDIterator.NonVirtual6bb499cf80928c63
neureka.ndim.iterator.NDIterator.decrement.20cbccbfd23da453b
neureka.ndim.iterator.NDIterator.get.0f56a7c0b695c8ea9
neureka.ndim.iterator.NDIterator.increment.147abe9d9bfb75a3e
neureka.ndim.iterator.NDIterator.of3a74000bfb0f0ee6
neureka.ndim.iterator.types.permuted.Permuted2DCIterator985b177401208745
neureka.ndim.iterator.types.permuted.Permuted3DCIteratora82b5ac7272ab2b3
neureka.ndim.iterator.types.simple.Simple1DCIteratorc9547a4f4df36a11
neureka.ndim.iterator.types.simple.Simple2DCIterator07fff5919c16d8c8
neureka.ndim.iterator.types.simple.Simple3DCIterator3caf6ddba6d423fd
neureka.ndim.iterator.types.sliced.Sliced1DCIterator8fc3bd4857cd73d7
neureka.ndim.iterator.types.sliced.Sliced2DCIteratord0b674e3f61ea4b1
neureka.ndim.iterator.types.sliced.Sliced3DCIteratorf14855d269e4a8c8
neureka.ndim.iterator.types.sliced.SlicedNDIteratorc9ec2066d49dd9d3
neureka.ndim.iterator.types.virtual.VirtualNDIteratorfcc87671f5e8ccf1
neureka.optimization.Optimizer34cb76ea2a563e11
neureka.optimization.Optimizer.29c25a86719c8bcb2
neureka.optimization.OptimizerFactory.create894e6338b9193598
neureka.optimization.implementations.ADAMae7cda3511ef04f8
neureka.optimization.implementations.ADAMFactory50a51c768fb04134
neureka.optimization.implementations.AdaGrad089fff244a04bf63
neureka.optimization.implementations.AdaGradFactory35bc645d59a522e8
neureka.optimization.implementations.Momentumb9dae698b5951173
neureka.optimization.implementations.MomentumFactory78e6e86d18cff000
neureka.optimization.implementations.RMSProp1427964de984e52e
neureka.optimization.implementations.RMSPropFactory8cc4f46951eb3ee6
neureka.optimization.implementations.SGD23b781608f48037c
neureka.optimization.implementations.SGDFactory67701017887296cc
neureka.view.NDPrintSettings3c8c5794cfd5d17d
neureka.view.NDPrintSettings.clone.136d7d1108020a3536
neureka.view.NDPrintSettings.getHasGradient.7fbad5d364ddde2f0
neureka.view.NDPrintSettings.getHasSlimNumbers.9d95bd84ca4d9b27d
neureka.view.NDPrintSettings.getIsLegacy.61b7cf4ea6b64e6ad
neureka.view.NDPrintSettings.getIsScientific.109e7c602fccd71ce8
neureka.view.NDPrintSettings.setCellSize.44aafbb8906c26d15
neureka.view.NDPrintSettings.setHasDerivatives.166c01d4c6ecf91b30
neureka.view.NDPrintSettings.setHasGradient.828c818067e3ce81f
neureka.view.NDPrintSettings.setHasRecursiveGraph.15e2c06f4675128b72
neureka.view.NDPrintSettings.setHasShape.178b1af48dfe5f67b6
neureka.view.NDPrintSettings.setHasSlimNumbers.0b24b8fd41ff8e6a9
neureka.view.NDPrintSettings.setHasValue.14ac782c6ebf14450a
neureka.view.NDPrintSettings.setIsCellBound.2f03f3810bd40f620
neureka.view.NDPrintSettings.setIsLegacy4c8b93fa6d68779c
neureka.view.NDPrintSettings.setIsMultiline.3a691a2047d576469
neureka.view.NDPrintSettings.setIsScientific.11c9b77521fe520ff
neureka.view.NDPrintSettings.setPostfix.12e584980fa42f27f7
neureka.view.NDPrintSettings.setPrefix.1161733b7819b45179
neureka.view.NDPrintSettings.setRowLimit.5b416616f6b8bb551
neureka.view.NdaAsString48c1f27eff338f39
neureka.view.NdaAsString.1fe91a32e5394599d
neureka.view.NdaAsString.Util5ef9cf059917364a
org.apache.commons.lang.StringUtils9dd94cc65aafa7e1
org.apache.groovy.ast.tools.AnnotatedNodeUtils7590d48a109f3424
org.apache.groovy.ast.tools.ClassNodeUtils57f7b57f2e136fe1
org.apache.groovy.ast.tools.ConstructorNodeUtils7a24eddc9c269b1f
org.apache.groovy.ast.tools.ExpressionUtils5fa1a114ca9b15de
org.apache.groovy.ast.tools.MethodNodeUtilsdbd786416bfcaa83
org.apache.groovy.io.StringBuilderWritera68174f2306f5a8c
org.apache.groovy.json.DefaultFastStringServicef7560fc98a0aff7c
org.apache.groovy.json.DefaultFastStringServiceFactory6c824e26c96d4d48
org.apache.groovy.json.internal.BaseJsonParserf5bbeb8a01f53f98
org.apache.groovy.json.internal.ByteScanner9f19b62afc96c6ee
org.apache.groovy.json.internal.CacheType13486b0dfc74c4b2
org.apache.groovy.json.internal.CharBuf0d194836b0727cfd
org.apache.groovy.json.internal.CharScannerfbf9a08251df7791
org.apache.groovy.json.internal.Chr57bd2788ec22e6d3
org.apache.groovy.json.internal.FastStringUtils497e9aa587de930f
org.apache.groovy.json.internal.FastStringUtils.ServiceHolderc8ae267a17e78e24
org.apache.groovy.json.internal.JsonParserCharArray66c048f50e64723f
org.apache.groovy.json.internal.LazyMapf3b9cd56252f0cb7
org.apache.groovy.json.internal.SimpleCache7290c462f2c145e2
org.apache.groovy.json.internal.Sys68c4db56d93e3bcd
org.apache.groovy.parser.antlr4.AbstractLexerae13a169aea8d237
org.apache.groovy.parser.antlr4.AbstractParser97a0e933c249392d
org.apache.groovy.parser.antlr4.Antlr4ParserPlugin38e6808ae0a2bab7
org.apache.groovy.parser.antlr4.Antlr4PluginFactoryfabc9c16749f4887
org.apache.groovy.parser.antlr4.AstBuilder3d65336586a7aac2
org.apache.groovy.parser.antlr4.AstBuilder.17784df756aa1d268
org.apache.groovy.parser.antlr4.AstBuilder.DeclarationListStatementb01936d54b561002
org.apache.groovy.parser.antlr4.GroovyLangLexer20e861043914f40e
org.apache.groovy.parser.antlr4.GroovyLangLexer.PositionAdjustingLexerATNSimulator84d506555ce4b8c8
org.apache.groovy.parser.antlr4.GroovyLangParser557c60358e21afce
org.apache.groovy.parser.antlr4.GroovyLexerdf900b993d8abc8f
org.apache.groovy.parser.antlr4.GroovyLexer.Paren6e4371045f6215eb
org.apache.groovy.parser.antlr4.GroovyParserdc9101a5deab4fcc
org.apache.groovy.parser.antlr4.GroovyParser.AdditiveExprAltContextdd064c701d5b26a0
org.apache.groovy.parser.antlr4.GroovyParser.AnnotationContext618fb64839055a49
org.apache.groovy.parser.antlr4.GroovyParser.AnnotationNameContextc3cec23e121113b2
org.apache.groovy.parser.antlr4.GroovyParser.AnnotationsOptContext5c5ed34f7876dacb
org.apache.groovy.parser.antlr4.GroovyParser.AnonymousInnerClassDeclarationContext820bf9a41f6e1ce3
org.apache.groovy.parser.antlr4.GroovyParser.ArgumentsContextfd7964d29500d661
org.apache.groovy.parser.antlr4.GroovyParser.ArrayInitializerContextefcdcb3ed8bc2b79
org.apache.groovy.parser.antlr4.GroovyParser.AssertStatementContextbdeec491a55bb527
org.apache.groovy.parser.antlr4.GroovyParser.AssertStmtAltContext9a042b6f35da72c0
org.apache.groovy.parser.antlr4.GroovyParser.AssignmentExprAltContext5de9d7a0612ebe48
org.apache.groovy.parser.antlr4.GroovyParser.BlockContext567aa072ab5f9ea5
org.apache.groovy.parser.antlr4.GroovyParser.BlockStatementContextb59c6c011781e332
org.apache.groovy.parser.antlr4.GroovyParser.BlockStatementsContextd1cb076979703c83
org.apache.groovy.parser.antlr4.GroovyParser.BlockStatementsOptContextbead028b5085367f
org.apache.groovy.parser.antlr4.GroovyParser.BlockStmtAltContext58722788acb3f4d4
org.apache.groovy.parser.antlr4.GroovyParser.BooleanLiteralAltContext8fefae6dbd15bf32
org.apache.groovy.parser.antlr4.GroovyParser.BreakStatementContext72342e8230ad8e3f
org.apache.groovy.parser.antlr4.GroovyParser.BreakStmtAltContext095701d6f53cae8d
org.apache.groovy.parser.antlr4.GroovyParser.BuiltInTypeContextb9e88bc39c876264
org.apache.groovy.parser.antlr4.GroovyParser.BuiltInTypePrmrAltContext462287c1ba8e01ef
org.apache.groovy.parser.antlr4.GroovyParser.CastExprAltContext30ebbef185e54289
org.apache.groovy.parser.antlr4.GroovyParser.CastParExpressionContext2bb268e3ab5e8f86
org.apache.groovy.parser.antlr4.GroovyParser.CatchClauseContext2dbf16419c77570c
org.apache.groovy.parser.antlr4.GroovyParser.CatchTypeContext55125162fbe94059
org.apache.groovy.parser.antlr4.GroovyParser.ClassBodyContext84a3e66238bb58f5
org.apache.groovy.parser.antlr4.GroovyParser.ClassBodyDeclarationContext99e60b66df45e4f3
org.apache.groovy.parser.antlr4.GroovyParser.ClassDeclarationContextee58053883f09f53
org.apache.groovy.parser.antlr4.GroovyParser.ClassNameContextf7154bc5b00814ea
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceModifierContexte9d1a280d8ad0514
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceModifiersContext609678f0d21f9eda
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceModifiersOptContextfe4209fba627fe9b
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceTypeContext66fdfce74cce0742
org.apache.groovy.parser.antlr4.GroovyParser.ClassicalForControlContext7831be20593971a1
org.apache.groovy.parser.antlr4.GroovyParser.ClosureContextfe4964aec9557837
org.apache.groovy.parser.antlr4.GroovyParser.ClosureOrLambdaExpressionContext3c0ea287705cfd79
org.apache.groovy.parser.antlr4.GroovyParser.ClosureOrLambdaExpressionPrmrAltContexte69bc698135daca8
org.apache.groovy.parser.antlr4.GroovyParser.CommandArgumentContext2aef0dedee746b95
org.apache.groovy.parser.antlr4.GroovyParser.CommandExprAltContext3e531d005fc90c26
org.apache.groovy.parser.antlr4.GroovyParser.CommandExpressionContext6c5ab69f829d7362
org.apache.groovy.parser.antlr4.GroovyParser.CompilationUnitContext00e549aadc8c8827
org.apache.groovy.parser.antlr4.GroovyParser.ConditionalExprAltContext081e6b3b2a19964e
org.apache.groovy.parser.antlr4.GroovyParser.ConditionalStatementContext5f1061a3096f9f5d
org.apache.groovy.parser.antlr4.GroovyParser.ConditionalStmtAltContext883c9582524419c1
org.apache.groovy.parser.antlr4.GroovyParser.CreatedNameContextfeeed34772f081c0
org.apache.groovy.parser.antlr4.GroovyParser.CreatorContext54f9a69fec30927c
org.apache.groovy.parser.antlr4.GroovyParser.DimContext53b521dc1593935e
org.apache.groovy.parser.antlr4.GroovyParser.ElementValueArrayInitializerContexte6b559e9fe0e0541
org.apache.groovy.parser.antlr4.GroovyParser.ElementValueContext8c6b9c9caf27d9d4
org.apache.groovy.parser.antlr4.GroovyParser.ElementValuesContextb2707110308075eb
org.apache.groovy.parser.antlr4.GroovyParser.EmptyDimsContext192ecdc6b17d4cf5
org.apache.groovy.parser.antlr4.GroovyParser.EmptyDimsOptContext32a47d238865d595
org.apache.groovy.parser.antlr4.GroovyParser.EmptyStmtAltContext519e86814dca8d0b
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedArgumentListElementContextd037c17e71f9e10a
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedArgumentListInParContexte8027fd6d1f3318a
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedForControlContext7ddd3685d8cad0a3
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedStatementExpressionContext69394e832cab5c26
org.apache.groovy.parser.antlr4.GroovyParser.EqualityExprAltContextdc175ac1079c7386
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionContext5b32f683b31e7341
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionInParContext2684ca30045b847e
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionListContext75c11df39825996b
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionListElementContext5c9329f1b4e1f415
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionStmtAltContext862cffc51eeb24d9
org.apache.groovy.parser.antlr4.GroovyParser.FieldDeclarationContext6d97b7d65e5c4ba4
org.apache.groovy.parser.antlr4.GroovyParser.FloatingPointLiteralAltContext5f552dde11490e93
org.apache.groovy.parser.antlr4.GroovyParser.ForControlContextc4cdebae38d5ad77
org.apache.groovy.parser.antlr4.GroovyParser.ForInitContextc4251aa11caa2a16
org.apache.groovy.parser.antlr4.GroovyParser.ForStmtAltContext9fb9df7cb52d57ec
org.apache.groovy.parser.antlr4.GroovyParser.ForUpdateContextceec8f9489de377a
org.apache.groovy.parser.antlr4.GroovyParser.FormalParameterContext7e57384bd07f3d15
org.apache.groovy.parser.antlr4.GroovyParser.FormalParameterListContextb51a3b3397aaad12
org.apache.groovy.parser.antlr4.GroovyParser.FormalParametersContextfbf344facdf9549f
org.apache.groovy.parser.antlr4.GroovyParser.GroovyParserRuleContextf3e995e5003d2ac5
org.apache.groovy.parser.antlr4.GroovyParser.GstringContext61e9d8ad3d5dbeff
org.apache.groovy.parser.antlr4.GroovyParser.GstringPathContext9d718359c8305e68
org.apache.groovy.parser.antlr4.GroovyParser.GstringPrmrAltContext14e16fb84d08a435
org.apache.groovy.parser.antlr4.GroovyParser.GstringValueContexte33f36e25ad71bbb
org.apache.groovy.parser.antlr4.GroovyParser.IdentifierContextc4594def67401ba0
org.apache.groovy.parser.antlr4.GroovyParser.IdentifierPrmrAltContextf82cf08675801539
org.apache.groovy.parser.antlr4.GroovyParser.IfElseStatementContext94f4e3d08737ef37
org.apache.groovy.parser.antlr4.GroovyParser.ImportDeclarationContext7dab86c240a56e24
org.apache.groovy.parser.antlr4.GroovyParser.InclusiveOrExprAltContextf97b960e0a63b6dd
org.apache.groovy.parser.antlr4.GroovyParser.IndexPropertyArgsContextdb84112b6c449738
org.apache.groovy.parser.antlr4.GroovyParser.IntegerLiteralAltContextfd22750ede87b184
org.apache.groovy.parser.antlr4.GroovyParser.KeywordsContext2faeb70c1a46a88f
org.apache.groovy.parser.antlr4.GroovyParser.LabeledStmtAltContexta7177a95f3d91f1f
org.apache.groovy.parser.antlr4.GroovyParser.LambdaBodyContextea67fffa66b997b7
org.apache.groovy.parser.antlr4.GroovyParser.ListContext26a409fd65e92f4a
org.apache.groovy.parser.antlr4.GroovyParser.ListPrmrAltContextca0fef9c1495d478
org.apache.groovy.parser.antlr4.GroovyParser.LiteralContext40b0d24274198e72
org.apache.groovy.parser.antlr4.GroovyParser.LiteralPrmrAltContextc7f209a899f67463
org.apache.groovy.parser.antlr4.GroovyParser.LocalVariableDeclarationContext1fbe999a4de3f73c
org.apache.groovy.parser.antlr4.GroovyParser.LocalVariableDeclarationStmtAltContext867d792fa004215b
org.apache.groovy.parser.antlr4.GroovyParser.LogicalAndExprAltContexteed02043579f8a11
org.apache.groovy.parser.antlr4.GroovyParser.LogicalOrExprAltContext8c84e6744c8a4121
org.apache.groovy.parser.antlr4.GroovyParser.LoopStatementContext8c99c2d0b5662a75
org.apache.groovy.parser.antlr4.GroovyParser.LoopStmtAltContext300d4f80877721c1
org.apache.groovy.parser.antlr4.GroovyParser.MapContextc5eb2b9ffdc775c0
org.apache.groovy.parser.antlr4.GroovyParser.MapEntryContext73840403044b1615
org.apache.groovy.parser.antlr4.GroovyParser.MapEntryLabelContexte552f7dd07405488
org.apache.groovy.parser.antlr4.GroovyParser.MapEntryListContext476ddc8e1570cbcf
org.apache.groovy.parser.antlr4.GroovyParser.MapPrmrAltContexta0c21325e7f94f92
org.apache.groovy.parser.antlr4.GroovyParser.MemberDeclarationContext76476bcad5f9cf3a
org.apache.groovy.parser.antlr4.GroovyParser.MethodBodyContext874b40b3df80507a
org.apache.groovy.parser.antlr4.GroovyParser.MethodDeclarationContext2fb2610e48c382cc
org.apache.groovy.parser.antlr4.GroovyParser.MethodNameContexta629435a16ab8b45
org.apache.groovy.parser.antlr4.GroovyParser.ModifierContext3aa64aa7abbd3889
org.apache.groovy.parser.antlr4.GroovyParser.ModifiersContextd08dbc27b83d8f40
org.apache.groovy.parser.antlr4.GroovyParser.ModifiersOptContext6d56331eed5d9a66
org.apache.groovy.parser.antlr4.GroovyParser.MultiplicativeExprAltContext7922f811e15eaf09
org.apache.groovy.parser.antlr4.GroovyParser.NamePartContextd9f6f8fb14b76b09
org.apache.groovy.parser.antlr4.GroovyParser.NewPrmrAltContextb597bd50eba6e0e6
org.apache.groovy.parser.antlr4.GroovyParser.NlsContext2fdf643b79718533
org.apache.groovy.parser.antlr4.GroovyParser.NullLiteralAltContext149aa79a7d54c21c
org.apache.groovy.parser.antlr4.GroovyParser.PackageDeclarationContexta2b70d9781f4c63b
org.apache.groovy.parser.antlr4.GroovyParser.ParExpressionContext5473d836fa689fee
org.apache.groovy.parser.antlr4.GroovyParser.ParenPrmrAltContexte996e4f684ee7979
org.apache.groovy.parser.antlr4.GroovyParser.PathElementContext15ea18bff6a158cf
org.apache.groovy.parser.antlr4.GroovyParser.PathExpressionContexte8baf19bafeb6da2
org.apache.groovy.parser.antlr4.GroovyParser.PostfixExprAltContext4beba501d0d5f4b7
org.apache.groovy.parser.antlr4.GroovyParser.PostfixExpressionContext85c649d09d64009d
org.apache.groovy.parser.antlr4.GroovyParser.PowerExprAltContextb4ac5998ca284bff
org.apache.groovy.parser.antlr4.GroovyParser.PrimaryContexta571b69ac1761d21
org.apache.groovy.parser.antlr4.GroovyParser.PrimitiveTypeContext427c5b610d8e852b
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedClassNameContext3518ec262c5d852c
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedNameContext2258f8a31564cdb1
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedNameElementContextbe9d0b2ee38f7e38
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedNameElementsContextda0a5e35978bf057
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedStandardClassNameContext8c45f87071f8f7e7
org.apache.groovy.parser.antlr4.GroovyParser.RelationalExprAltContext19defcbe6fb2a548
org.apache.groovy.parser.antlr4.GroovyParser.ReturnStmtAltContext39e8e34a052524ca
org.apache.groovy.parser.antlr4.GroovyParser.ReturnTypeContext9574f414c9cfd0e8
org.apache.groovy.parser.antlr4.GroovyParser.RparenContext0d9a41d13b53082a
org.apache.groovy.parser.antlr4.GroovyParser.ScriptStatementContext8228e6eaa73fff6d
org.apache.groovy.parser.antlr4.GroovyParser.ScriptStatementsContext8f23d0fa1f21e231
org.apache.groovy.parser.antlr4.GroovyParser.SepContextfbbb2be5eeddc7e4
org.apache.groovy.parser.antlr4.GroovyParser.ShiftExprAltContextbc78a5ef993a51cf
org.apache.groovy.parser.antlr4.GroovyParser.StandardLambdaExpressionContext56cda7a28e539498
org.apache.groovy.parser.antlr4.GroovyParser.StandardLambdaParametersContext8d619fdb6ecb8377
org.apache.groovy.parser.antlr4.GroovyParser.StatementContextb1942dcdd995c90a
org.apache.groovy.parser.antlr4.GroovyParser.StatementExpressionContextc34f25d808e8f3f7
org.apache.groovy.parser.antlr4.GroovyParser.StringLiteralAltContext72482e52285b7f89
org.apache.groovy.parser.antlr4.GroovyParser.StringLiteralContext2b3200619be272ad
org.apache.groovy.parser.antlr4.GroovyParser.SuperPrmrAltContext2792d8d6d2039741
org.apache.groovy.parser.antlr4.GroovyParser.SwitchBlockStatementGroupContext241ccd0d2e82c42e
org.apache.groovy.parser.antlr4.GroovyParser.SwitchLabelContextdeca17a6a1f01ce7
org.apache.groovy.parser.antlr4.GroovyParser.SwitchStatementContext5947957d8b512d32
org.apache.groovy.parser.antlr4.GroovyParser.ThisPrmrAltContext99c2818d9f389200
org.apache.groovy.parser.antlr4.GroovyParser.ThrowStmtAltContext51aa36fdd2560545
org.apache.groovy.parser.antlr4.GroovyParser.TryCatchStatementContext535985fcb82fa41f
org.apache.groovy.parser.antlr4.GroovyParser.TryCatchStmtAltContext8b0a01c96eac70ce
org.apache.groovy.parser.antlr4.GroovyParser.TypeArgumentContext106032999f84b5b7
org.apache.groovy.parser.antlr4.GroovyParser.TypeArgumentsContext44765f3fbcb15f5f
org.apache.groovy.parser.antlr4.GroovyParser.TypeArgumentsOrDiamondContextd038c853c6d54218
org.apache.groovy.parser.antlr4.GroovyParser.TypeContextb484767f76403f63
org.apache.groovy.parser.antlr4.GroovyParser.TypeDeclarationContext886f32b1a31eb33c
org.apache.groovy.parser.antlr4.GroovyParser.TypeListContext48afd3a8b2f507c5
org.apache.groovy.parser.antlr4.GroovyParser.UnaryAddExprAltContext2e470cc253d0ca83
org.apache.groovy.parser.antlr4.GroovyParser.UnaryNotExprAltContext89466ef48d352a38
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclarationContext1cda641957b2180f
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclaratorContextd8c3ae73b246cf79
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclaratorIdContextc427b0aed1a24e29
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclaratorsContext66790e458eee74c1
org.apache.groovy.parser.antlr4.GroovyParser.VariableInitializerContext9410c14e1c6363b1
org.apache.groovy.parser.antlr4.GroovyParser.VariableInitializersContext43d6c6ae724e6fef
org.apache.groovy.parser.antlr4.GroovyParser.VariableModifierContext1cf944b86bee1287
org.apache.groovy.parser.antlr4.GroovyParser.VariableModifiersContext426155b3ae026a12
org.apache.groovy.parser.antlr4.GroovyParser.VariableModifiersOptContext738d91d410a226da
org.apache.groovy.parser.antlr4.GroovyParser.WhileStmtAltContext8217c8a44d2d2e41
org.apache.groovy.parser.antlr4.GroovyParserBaseVisitorc29ec8ad968078b7
org.apache.groovy.parser.antlr4.GroovydocManager5c79668f1e038375
org.apache.groovy.parser.antlr4.ModifierManagerbcc4dc53f9c3fa43
org.apache.groovy.parser.antlr4.SemanticPredicates89a445e35a437ebc
org.apache.groovy.parser.antlr4.SyntaxErrorReportableab8e9dbcabb575de
org.apache.groovy.parser.antlr4.TryWithResourcesASTTransformationcbd3ba5914c5ad39
org.apache.groovy.parser.antlr4.internal.DescriptiveErrorStrategye17e7667b1be7f29
org.apache.groovy.parser.antlr4.internal.atnmanager.AtnManager4ac1975cb383c354
org.apache.groovy.parser.antlr4.internal.atnmanager.AtnManager.AtnWrappercd9fef829e2748c1
org.apache.groovy.parser.antlr4.internal.atnmanager.LexerAtnManagerd194f146035c9f3c
org.apache.groovy.parser.antlr4.internal.atnmanager.ParserAtnManagerd9a08733f4727cd3
org.apache.groovy.parser.antlr4.util.PositionConfigureUtilsb05beaf68c052ec9
org.apache.groovy.parser.antlr4.util.StringUtils61af71578248b8fd
org.apache.groovy.parser.antlr4.util.StringUtils.1a48a2cd05db61523
org.apache.groovy.parser.antlr4.util.StringUtils.23be5fedabcf240e0
org.apache.groovy.parser.antlr4.util.StringUtils.3726ba31c43296187
org.apache.groovy.parser.antlr4.util.StringUtils.4ef59ce86e1873bec
org.apache.groovy.plugin.GroovyRunnerRegistry3ee0fd6ba999de37
org.apache.groovy.util.BeanUtils0417dea30775a32c
org.apache.groovy.util.Maps576f8db5d5e792ce
org.apache.groovy.util.SystemUtilc4542e5ae25149fa
org.codehaus.groovy.ast.ASTNode27d80a7d248ea427
org.codehaus.groovy.ast.AnnotatedNoded335f069daf8ad09
org.codehaus.groovy.ast.AnnotationNode26b19abb5bbcb185
org.codehaus.groovy.ast.AstToTextHelper74db57350e12d9c5
org.codehaus.groovy.ast.ClassCodeExpressionTransformerb8ce561c97df707d
org.codehaus.groovy.ast.ClassCodeVisitorSupportd405454b32a24513
org.codehaus.groovy.ast.ClassHelpera8f16bc507d7aa73
org.codehaus.groovy.ast.ClassHelper.ClassHelperCachee43309eceb1168a9
org.codehaus.groovy.ast.ClassNode4f5262ead6885cf8
org.codehaus.groovy.ast.ClassNode.MapOfListsf82fd0a95c2ed603
org.codehaus.groovy.ast.CodeVisitorSupportf562d0f23a0beb54
org.codehaus.groovy.ast.CompileUnit0ffddc45e71eb003
org.codehaus.groovy.ast.ConstructorNoded62ced7529f24e9b
org.codehaus.groovy.ast.DynamicVariable98984c61b44e848f
org.codehaus.groovy.ast.FieldNodecc02132719179867
org.codehaus.groovy.ast.GenericsType71dca4e557620440
org.codehaus.groovy.ast.GenericsType.GenericsTypeName1c63b6280d40380c
org.codehaus.groovy.ast.GroovyCodeVisitor1d82306bcf9aa598
org.codehaus.groovy.ast.ImportNodea56e6bc57b129e1d
org.codehaus.groovy.ast.InnerClassNode9fc3b6b24132debb
org.codehaus.groovy.ast.MethodNode14971edba915349b
org.codehaus.groovy.ast.MixinNode96e8e19d33b34b42
org.codehaus.groovy.ast.ModifierNode32c00f0a21e97088
org.codehaus.groovy.ast.ModuleNode3bda5c5baf925e47
org.codehaus.groovy.ast.NodeMetaDataHandlerf37500102f069967
org.codehaus.groovy.ast.PackageNode6dd38b8164cb7033
org.codehaus.groovy.ast.Parameterd61abfe440effc8c
org.codehaus.groovy.ast.PropertyNodebfca402b9b75df24
org.codehaus.groovy.ast.VariableScopeb7f8f462469b81b4
org.codehaus.groovy.ast.decompiled.AnnotationStub5bac1d03230f4745
org.codehaus.groovy.ast.decompiled.AsmDecompiler49e8cb7580e6c5b7
org.codehaus.groovy.ast.decompiled.AsmDecompiler.1ee284242e6a21a83
org.codehaus.groovy.ast.decompiled.AsmDecompiler.AnnotationReadereb0a44818bf5d80e
org.codehaus.groovy.ast.decompiled.AsmDecompiler.AnnotationReader.182213f80c244a78d
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor38d2263caca46eb6
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor.16982321f308d3999
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor.1.18f41620f0457cff5
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor.2e8bfbbdd3f431304
org.codehaus.groovy.ast.decompiled.AsmDecompiler.StubCache9f0575fc119a5680
org.codehaus.groovy.ast.decompiled.AsmReferenceResolvera86d6367b31ad908
org.codehaus.groovy.ast.decompiled.ClassSignatureParser18bca80ef1ee150f
org.codehaus.groovy.ast.decompiled.ClassSignatureParser.1540ba0e961b3149a
org.codehaus.groovy.ast.decompiled.ClassSignatureParser.1.1c08245ff90515c17
org.codehaus.groovy.ast.decompiled.ClassSignatureParser.1.22da4a797f464a755
org.codehaus.groovy.ast.decompiled.ClassStub1f7a82608b266819
org.codehaus.groovy.ast.decompiled.DecompiledClassNode716d7b1116bdecef
org.codehaus.groovy.ast.decompiled.EnumConstantWrapper3bdb9e225b37f388
org.codehaus.groovy.ast.decompiled.FieldStub183fbc81f00404e8
org.codehaus.groovy.ast.decompiled.FormalParameterParserd49495eadf0d522a
org.codehaus.groovy.ast.decompiled.FormalParameterParser.1507ea4ccbfe91a0d
org.codehaus.groovy.ast.decompiled.LazyFieldNodead1b0c58a5821f4e
org.codehaus.groovy.ast.decompiled.MemberSignatureParserd173f4fa8aacb393
org.codehaus.groovy.ast.decompiled.MemberStub9e5f606d39f542e6
org.codehaus.groovy.ast.decompiled.MethodStub2f083ce5b5b87552
org.codehaus.groovy.ast.decompiled.TypeSignatureParser215d572ca55d3403
org.codehaus.groovy.ast.decompiled.TypeSignatureParser.15f8735a9022fadbc
org.codehaus.groovy.ast.decompiled.TypeSignatureParser.28a8c8bade718f28f
org.codehaus.groovy.ast.decompiled.TypeWrapperd090d3490cb28772
org.codehaus.groovy.ast.expr.ArgumentListExpression3d531923823d5b30
org.codehaus.groovy.ast.expr.ArrayExpressione6a32944f25ba588
org.codehaus.groovy.ast.expr.BinaryExpression2c0292f137a27d91
org.codehaus.groovy.ast.expr.BitwiseNegationExpression185550b0cf4d0399
org.codehaus.groovy.ast.expr.BooleanExpression1f8db1269310081a
org.codehaus.groovy.ast.expr.CastExpression2e3a52f2f423acff
org.codehaus.groovy.ast.expr.ClassExpression73344b9a1ef4581a
org.codehaus.groovy.ast.expr.ClosureExpressiond70baffc6b922489
org.codehaus.groovy.ast.expr.ClosureListExpression114c13cb211c18c8
org.codehaus.groovy.ast.expr.ConstantExpression10b8d113e6cd1668
org.codehaus.groovy.ast.expr.ConstructorCallExpression71ebdd1979a85fb4
org.codehaus.groovy.ast.expr.DeclarationExpression55c5949f6d91ef49
org.codehaus.groovy.ast.expr.ElvisOperatorExpressionf4bbe9664ff27c5b
org.codehaus.groovy.ast.expr.EmptyExpression1d11452f5922029f
org.codehaus.groovy.ast.expr.EmptyExpression.1b9e1531a39fea5f1
org.codehaus.groovy.ast.expr.Expression1df3a6d59fc14fe0
org.codehaus.groovy.ast.expr.FieldExpression0432cda38eeb0d67
org.codehaus.groovy.ast.expr.GStringExpression50dac6fdac936913
org.codehaus.groovy.ast.expr.LambdaExpression636293f93c040de4
org.codehaus.groovy.ast.expr.ListExpression69fcdbd2fea53809
org.codehaus.groovy.ast.expr.MapEntryExpression007935b4f50e67c1
org.codehaus.groovy.ast.expr.MapExpression8c2b8b2843d394b7
org.codehaus.groovy.ast.expr.MethodCallExpression0e12ee5c74e158d1
org.codehaus.groovy.ast.expr.MethodCallExpression.1fdb3cd64606a87eb
org.codehaus.groovy.ast.expr.MethodPointerExpressioncbe178af7a24e2ab
org.codehaus.groovy.ast.expr.MethodReferenceExpressionb9fe359596872382
org.codehaus.groovy.ast.expr.NamedArgumentListExpression978bd0964b5e29be
org.codehaus.groovy.ast.expr.NotExpressiond6e0791f05a477c4
org.codehaus.groovy.ast.expr.PostfixExpression04af6ff54d8f0f8e
org.codehaus.groovy.ast.expr.PropertyExpressione268dc87eb28b623
org.codehaus.groovy.ast.expr.RangeExpression6c7cf8004640f124
org.codehaus.groovy.ast.expr.StaticMethodCallExpressionb4cfc9d785f2dfea
org.codehaus.groovy.ast.expr.TernaryExpression36d5e2792d81504a
org.codehaus.groovy.ast.expr.TupleExpression69b1236c3606063c
org.codehaus.groovy.ast.expr.UnaryMinusExpression186c93076f1def49
org.codehaus.groovy.ast.expr.VariableExpressiondc5fbdf497156b3a
org.codehaus.groovy.ast.stmt.AssertStatementa7195e5672abbc86
org.codehaus.groovy.ast.stmt.BlockStatementaeb641e965d73c1f
org.codehaus.groovy.ast.stmt.BreakStatement752c36d936f72bc7
org.codehaus.groovy.ast.stmt.CaseStatementcbe95b158cfe94b0
org.codehaus.groovy.ast.stmt.CatchStatementb5e9464f40d5b651
org.codehaus.groovy.ast.stmt.EmptyStatement62c778d68fae0b06
org.codehaus.groovy.ast.stmt.EmptyStatement.14450ab0173a753b7
org.codehaus.groovy.ast.stmt.ExpressionStatement5691196fe2475158
org.codehaus.groovy.ast.stmt.ForStatement0f97748e98a9ffe8
org.codehaus.groovy.ast.stmt.IfStatement3ac00abf2412dfe8
org.codehaus.groovy.ast.stmt.ReturnStatementf4379361135efd0c
org.codehaus.groovy.ast.stmt.Statementf9fa1507607f86b4
org.codehaus.groovy.ast.stmt.SwitchStatementd940bdb1748bee4f
org.codehaus.groovy.ast.stmt.ThrowStatementc23aa272b17359e4
org.codehaus.groovy.ast.stmt.TryCatchStatement090143ad55911add
org.codehaus.groovy.ast.stmt.WhileStatementb6b230c40b8a4000
org.codehaus.groovy.ast.tools.ClosureUtils1cb5fae2e2e628ae
org.codehaus.groovy.ast.tools.GeneralUtils4b6ca7027e57255f
org.codehaus.groovy.ast.tools.GenericsUtilscbe95d78127b7c35
org.codehaus.groovy.ast.tools.ParameterUtils8c66a32f3e34e743
org.codehaus.groovy.ast.tools.WideningCategories92c6c4552ddcfc45
org.codehaus.groovy.ast.tools.WideningCategories.LowestUpperBoundClassNodeb2bc042c5c6e3de4
org.codehaus.groovy.classgen.AnnotationVisitord54210ca6929cbe6
org.codehaus.groovy.classgen.AsmClassGenerator0d4e1065c7ba68d7
org.codehaus.groovy.classgen.BytecodeExpressionb6c655e090cad821
org.codehaus.groovy.classgen.BytecodeExpression.1e480d6489d70e81a
org.codehaus.groovy.classgen.BytecodeInstruction42479d559ae0946f
org.codehaus.groovy.classgen.BytecodeSequencee93183df5baf087f
org.codehaus.groovy.classgen.ClassCompletionVerifiereddd81ec063dfa20
org.codehaus.groovy.classgen.ClassGenerator85bb0b9b19553bf0
org.codehaus.groovy.classgen.EnumCompletionVisitora8de6ba10c2036a2
org.codehaus.groovy.classgen.EnumVisitor8fbe41edc0eff021
org.codehaus.groovy.classgen.ExtendedVerifier9c576671bd52cfd0
org.codehaus.groovy.classgen.FinalVariableAnalyzer8d904efe78ec5a06
org.codehaus.groovy.classgen.FinalVariableAnalyzer.1316606381ec5cfc3
org.codehaus.groovy.classgen.FinalVariableAnalyzer.StateMapbbc6bb9b243eb527
org.codehaus.groovy.classgen.FinalVariableAnalyzer.VariableState4e34c073edcc818e
org.codehaus.groovy.classgen.GeneratorContexte10e733e7c19a303
org.codehaus.groovy.classgen.InnerClassCompletionVisitor145fa30c4eb9aa86
org.codehaus.groovy.classgen.InnerClassVisitorba2be29ad82a0393
org.codehaus.groovy.classgen.InnerClassVisitorHelper0a5a972653fda9f7
org.codehaus.groovy.classgen.ReturnAdder2df8bbe375ab1c9d
org.codehaus.groovy.classgen.VariableScopeVisitordfc559d2ad392b66
org.codehaus.groovy.classgen.VariableScopeVisitor.StateStackElementcbf5b05681520497
org.codehaus.groovy.classgen.Verifier1dc628bf3a53885d
org.codehaus.groovy.classgen.Verifier.1528f1354adb12b51
org.codehaus.groovy.classgen.Verifier.11281db149369c519a
org.codehaus.groovy.classgen.Verifier.23b31616ba195d918
org.codehaus.groovy.classgen.Verifier.5733db0e33998eccf
org.codehaus.groovy.classgen.Verifier.6a3dd9618bbc95c5a
org.codehaus.groovy.classgen.Verifier.78568eeb05e35b387
org.codehaus.groovy.classgen.Verifier.837b6e0cce2c0efd8
org.codehaus.groovy.classgen.Verifier.SwapInitStatement25080b877b208ebd
org.codehaus.groovy.classgen.Verifier.SwapInitStatement.SwapInitInstructionc62ea0af18ce75f5
org.codehaus.groovy.classgen.VerifierCodeVisitor3863c08c0d0bde34
org.codehaus.groovy.classgen.asm.AssertionWriterca1bddb297191464
org.codehaus.groovy.classgen.asm.BinaryBooleanExpressionHelper3f5ec154c6e46d4c
org.codehaus.groovy.classgen.asm.BinaryDoubleExpressionHelper01026d8f29444918
org.codehaus.groovy.classgen.asm.BinaryExpressionHelper191471bc1e228ae6
org.codehaus.groovy.classgen.asm.BinaryExpressionHelper.1b03c6681c8240e9b
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatchera82c5bb5386fb2de
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatcher.BinaryByteExpressionHelper71a13f668d616e28
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatcher.BinaryCharExpressionHelperc058fc9112a4e925
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatcher.BinaryShortExpressionHelper7982aa460eac34f8
org.codehaus.groovy.classgen.asm.BinaryExpressionWriter23c425b6d93f454b
org.codehaus.groovy.classgen.asm.BinaryFloatExpressionHelper293d8cd9bc9b45fe
org.codehaus.groovy.classgen.asm.BinaryIntExpressionHelperdad6c59f3a2cf3a6
org.codehaus.groovy.classgen.asm.BinaryLongExpressionHelper6ee685e1d5ef5b4f
org.codehaus.groovy.classgen.asm.BinaryObjectExpressionHelperf4a43b17f1c65597
org.codehaus.groovy.classgen.asm.BytecodeHelperfef61a72a580c719
org.codehaus.groovy.classgen.asm.BytecodeHelper.LoadVarHandlera42b43a4d228ebff
org.codehaus.groovy.classgen.asm.BytecodeHelper.PrimitiveTypeHandler9730d9d9e5577caa
org.codehaus.groovy.classgen.asm.BytecodeHelper.ReturnVarHandler22c00f4b3fc88e1d
org.codehaus.groovy.classgen.asm.BytecodeHelper.StoreVarHandler6885d370a768b756
org.codehaus.groovy.classgen.asm.BytecodeVariable7a47bacc3628d145
org.codehaus.groovy.classgen.asm.CallSiteWritera1a60bd746fbf4c9
org.codehaus.groovy.classgen.asm.ClosureWriterf674562272bbcf7c
org.codehaus.groovy.classgen.asm.ClosureWriter.CorrectAccessedVariableVisitor49caaf3e4022c1cd
org.codehaus.groovy.classgen.asm.CompileStack3ed56a286666ee7f
org.codehaus.groovy.classgen.asm.CompileStack.BlockRecordere45c51ccd346cbb4
org.codehaus.groovy.classgen.asm.CompileStack.LabelRange8071ba169c1acf28
org.codehaus.groovy.classgen.asm.CompileStack.StateStackElement2ba0e3c3a19c614e
org.codehaus.groovy.classgen.asm.ExpressionAsVariableSlot6b83220df23eff5e
org.codehaus.groovy.classgen.asm.InvocationWriter914bfd02c1c8fbc3
org.codehaus.groovy.classgen.asm.LambdaWriterdbd0cee4cedcd829
org.codehaus.groovy.classgen.asm.MethodCaller3c994913c1a01342
org.codehaus.groovy.classgen.asm.MethodCallerMultiAdapter97a709291ed8983c
org.codehaus.groovy.classgen.asm.MethodPointerExpressionWriter4130d23cb53d8b84
org.codehaus.groovy.classgen.asm.MethodReferenceExpressionWritere36bf4bbb9064b82
org.codehaus.groovy.classgen.asm.MopWriter283bd1034d6ee360
org.codehaus.groovy.classgen.asm.MopWriter.MopKey82eba74ba42189ac
org.codehaus.groovy.classgen.asm.OperandStack514ffa14413ba264
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter4f3c4e61d9aa3b41
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.FastPathData31f4ceee0634cb53
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.OptVisitore4d0ae6f7e3ce1fa
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.OptimizeFlagsCollectore4e54efbb840ebf9
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.OptimizeFlagsCollector.OptimizeFlagsEntryd0f4f77a14a36210
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.StatementMetae64007c09d4866da
org.codehaus.groovy.classgen.asm.StatementMetaTypeChooserdf2afbf9bab39cfd
org.codehaus.groovy.classgen.asm.StatementWritere30f1ae52e3c880d
org.codehaus.groovy.classgen.asm.UnaryExpressionHelperadf2399d560555f8
org.codehaus.groovy.classgen.asm.VariableSlotLoader261bba1b4e9323cd
org.codehaus.groovy.classgen.asm.WriterControllerb2250c41ebdc6f03
org.codehaus.groovy.classgen.asm.util.TypeUtil0739a6f08a244ee5
org.codehaus.groovy.control.ASTTransformationsContextbe817020f6427fbc
org.codehaus.groovy.control.AnnotationConstantsVisitorcebd46142ba04d9c
org.codehaus.groovy.control.ClassNodeResolver7f5f74ad3a0a37e0
org.codehaus.groovy.control.ClassNodeResolver.1cb69783c4658c503
org.codehaus.groovy.control.ClassNodeResolver.LookupResult44003b5e329f1074
org.codehaus.groovy.control.CompilationUnit0e7ced7ed4ea4a50
org.codehaus.groovy.control.CompilationUnit.1a6bf753ea21a0862
org.codehaus.groovy.control.CompilationUnit.3c9a24646ceb937b4
org.codehaus.groovy.control.CompilationUnit.3.1256a97fae8feb575
org.codehaus.groovy.control.CompilationUnit.4875c52fea0052fac
org.codehaus.groovy.control.CompilationUnit.IPrimaryClassNodeOperation0fd303e48e6c5b5c
org.codehaus.groovy.control.CompilationUnit.ISourceUnitOperation1051bdc0aecae446
org.codehaus.groovy.control.CompilationUnit.SourceUnitOperation0f420c8629f70565
org.codehaus.groovy.control.CompilePhaseb22dbd477dfa9d38
org.codehaus.groovy.control.CompilerConfiguration770b870123cc770f
org.codehaus.groovy.control.CompilerConfiguration.114071beb61aee11d
org.codehaus.groovy.control.ErrorCollector107ebd86303489b6
org.codehaus.groovy.control.GenericsVisitorc6118ff45a5aba15
org.codehaus.groovy.control.InstanceOfVerifier4d66b8f40c3a4443
org.codehaus.groovy.control.Janitor861eebc754e71b0d
org.codehaus.groovy.control.LabelVerifier36aad9e09af640eb
org.codehaus.groovy.control.OptimizerVisitor8a99aa043255701c
org.codehaus.groovy.control.ParserPluginFactoryf5a307a9b33e4bf1
org.codehaus.groovy.control.ProcessingUnit9d4f9db3f247ca3e
org.codehaus.groovy.control.ResolveVisitor4346a70f38e3a2eb
org.codehaus.groovy.control.ResolveVisitor.ConstructedClassWithPackagea75415b1280f79bf
org.codehaus.groovy.control.ResolveVisitor.ConstructedNestedClass7f387bc2e8cd2e07
org.codehaus.groovy.control.ResolveVisitor.LowerCaseClassb65c5cb28c8950c2
org.codehaus.groovy.control.SourceExtensionHandler093f828e954a0cff
org.codehaus.groovy.control.SourceUnitf22ad65ecfe6277e
org.codehaus.groovy.control.StaticImportVisitor1585dea6f87dec71
org.codehaus.groovy.control.StaticVerifier583f75923bb31941
org.codehaus.groovy.control.io.AbstractReaderSource5ee327700dc39af5
org.codehaus.groovy.control.io.FileReaderSource0f0dcf456472d71b
org.codehaus.groovy.control.io.StringReaderSource3627cc80051e76a2
org.codehaus.groovy.control.io.URLReaderSourcebec494ccc4cbd1d4
org.codehaus.groovy.reflection.AccessPermissionChecker47507cada23adb0d
org.codehaus.groovy.reflection.CachedClass61a90628b1cef6ff
org.codehaus.groovy.reflection.CachedClass.19ecb1ea7568e0fef
org.codehaus.groovy.reflection.CachedClass.21b391b495c1cd22f
org.codehaus.groovy.reflection.CachedClass.3c92fb7acefe4b304
org.codehaus.groovy.reflection.CachedClass.40a4f655a29280e63
org.codehaus.groovy.reflection.CachedClass.5337047025769aa08
org.codehaus.groovy.reflection.CachedClass.60c00e7252b9f9993
org.codehaus.groovy.reflection.CachedClass.7f10dd3af723972ef
org.codehaus.groovy.reflection.CachedClass.876ed05eee8a25740
org.codehaus.groovy.reflection.CachedClass.CachedMethodComparatorByName1a73bb81d4844d9e
org.codehaus.groovy.reflection.CachedClass.CachedMethodComparatorWithStringf6a81ff615338ecf
org.codehaus.groovy.reflection.CachedConstructor5ea02358fb6319d6
org.codehaus.groovy.reflection.CachedFieldd824fac0350768a5
org.codehaus.groovy.reflection.CachedMethod0684f6c8638a4c5f
org.codehaus.groovy.reflection.CachedMethod.MyComparator426e0eb9b70cd6a2
org.codehaus.groovy.reflection.ClassInfo93b97a7835d710c4
org.codehaus.groovy.reflection.ClassInfo.15ff26d83668d78ea
org.codehaus.groovy.reflection.ClassInfo.GlobalClassSet0029626bb1baa5c0
org.codehaus.groovy.reflection.ClassInfo.LazyCachedClassReff566d559278b110a
org.codehaus.groovy.reflection.ClassInfo.LazyClassLoaderRef9391bd953eeeeb92
org.codehaus.groovy.reflection.ClassLoaderForClassArtifactscd3f47f254c7a3c6
org.codehaus.groovy.reflection.GeneratedMetaMethodf40c4c5d47696223
org.codehaus.groovy.reflection.GeneratedMetaMethod.DgmMethodRecord27846f6a0e596eda
org.codehaus.groovy.reflection.GeneratedMetaMethod.Proxy56ff849414de56d0
org.codehaus.groovy.reflection.GroovyClassValueFactoryb0126e453bd47f2f
org.codehaus.groovy.reflection.ParameterTypesfad1e4e254deb425
org.codehaus.groovy.reflection.ReflectionCache9c1e41e9e6f2631d
org.codehaus.groovy.reflection.ReflectionUtils1e1a1912a20226e2
org.codehaus.groovy.reflection.ReflectionUtils.ClassContextHelper94fe2e71c3f62572
org.codehaus.groovy.reflection.SunClassLoadera94fb403a11319f5
org.codehaus.groovy.reflection.android.AndroidSupportb2243773f41245ef
org.codehaus.groovy.reflection.stdclasses.ArrayCachedClass954a419728b08882
org.codehaus.groovy.reflection.stdclasses.BigDecimalCachedClass9e2647d746281bfe
org.codehaus.groovy.reflection.stdclasses.BigIntegerCachedClass5cc37ad17fc9d744
org.codehaus.groovy.reflection.stdclasses.BooleanCachedClass5486f72e95af237a
org.codehaus.groovy.reflection.stdclasses.ByteCachedClass7bb8867b4405ae3c
org.codehaus.groovy.reflection.stdclasses.CachedClosureClass7e1d8a2e9e189769
org.codehaus.groovy.reflection.stdclasses.CachedSAMClassfa772c9882a61d68
org.codehaus.groovy.reflection.stdclasses.CharacterCachedClassa0b4ca5f909c822d
org.codehaus.groovy.reflection.stdclasses.DoubleCachedClass5069f6885874f35e
org.codehaus.groovy.reflection.stdclasses.FloatCachedClass2a55ba3a1c744ee6
org.codehaus.groovy.reflection.stdclasses.IntegerCachedClassdab928d8ffb5f6db
org.codehaus.groovy.reflection.stdclasses.LongCachedClassc08fc5d792034051
org.codehaus.groovy.reflection.stdclasses.NumberCachedClass595fbbfe9e61d31c
org.codehaus.groovy.reflection.stdclasses.ObjectCachedClass92a5c3a91e64bfa8
org.codehaus.groovy.reflection.stdclasses.ShortCachedClass5d594a1dc66ded63
org.codehaus.groovy.reflection.stdclasses.StringCachedClass277cd4a861bac922
org.codehaus.groovy.reflection.v7.GroovyClassValueJava703cc9bee1dab9073
org.codehaus.groovy.runtime.ArrayUtil2c9e50ea5cac5ca7
org.codehaus.groovy.runtime.BytecodeInterface8c6733de4688f853d
org.codehaus.groovy.runtime.ConversionHandlerc1b2d9adf763863b
org.codehaus.groovy.runtime.ConvertedClosureb6649d2846f5bc9f
org.codehaus.groovy.runtime.DefaultCachedMethodKey0f1201491e668f78
org.codehaus.groovy.runtime.DefaultGroovyMethods980e68507ed2cd21
org.codehaus.groovy.runtime.DefaultGroovyMethods.26670b2fed3b9d78c
org.codehaus.groovy.runtime.DefaultGroovyMethodsSupport89df9a79db1a181f
org.codehaus.groovy.runtime.EncodingGroovyMethods14aef86bb73de01f
org.codehaus.groovy.runtime.EncodingGroovyMethods.24e790ab842ee6591
org.codehaus.groovy.runtime.GStringImpldfc50ebbe4374df0
org.codehaus.groovy.runtime.GStringImpl.replacea2530ec2bfafb4b9
org.codehaus.groovy.runtime.GroovyCategorySupporte4edc6dd0e000793
org.codehaus.groovy.runtime.GroovyCategorySupport.MyThreadLocal11fe1eca11a212c6
org.codehaus.groovy.runtime.HandleMetaClassd8e14ad6b525cb6a
org.codehaus.groovy.runtime.IOGroovyMethods381a4cb21cd1463c
org.codehaus.groovy.runtime.IOGroovyMethods.23f018f921c984594
org.codehaus.groovy.runtime.InvokerHelper4bfcb02f4a08345e
org.codehaus.groovy.runtime.InvokerHelper.toStringa7a3098708e9186f
org.codehaus.groovy.runtime.InvokerInvocationExceptiondc253c9e96b47eeb
org.codehaus.groovy.runtime.MetaClassHelper8184c22e7162e61d
org.codehaus.groovy.runtime.MethodClosure6af03ae852dcc4ff
org.codehaus.groovy.runtime.MethodKey646443d8e812045b
org.codehaus.groovy.runtime.NullObjecta907daf5264984b5
org.codehaus.groovy.runtime.NumberAwareComparator880ffcc95477bd3a
org.codehaus.groovy.runtime.RangeInfo09cf4f1f9d0de986
org.codehaus.groovy.runtime.ResourceGroovyMethods547029ee9863d695
org.codehaus.groovy.runtime.ScriptBytecodeAdapter908d24de4a0d35bc
org.codehaus.groovy.runtime.StringGroovyMethods6dc27ca396c034d0
org.codehaus.groovy.runtime.StringGroovyMethods.LineIterable80e3d67071a3dc54
org.codehaus.groovy.runtime.callsite.AbstractCallSitea26885994536a798
org.codehaus.groovy.runtime.callsite.BooleanClosureWrapperaf827dfd42d54386
org.codehaus.groovy.runtime.callsite.BooleanReturningMethodInvokerc75a55dbb9a427bc
org.codehaus.groovy.runtime.callsite.CallSiteArray5d00e3c37cb1859d
org.codehaus.groovy.runtime.callsite.CallSiteAwareMetaMethod44a7e7fe87ffbde3
org.codehaus.groovy.runtime.callsite.CallSiteClassLoaderd639b22e44e8ccc8
org.codehaus.groovy.runtime.callsite.CallSiteGenerator3ee0eae40c8423ed
org.codehaus.groovy.runtime.callsite.ClassMetaClassGetPropertySite5a4beb7bf7d11262
org.codehaus.groovy.runtime.callsite.ConstructorSite2f01aacbbd81be50
org.codehaus.groovy.runtime.callsite.ConstructorSite.ConstructorSiteNoUnwrapd705adefbb477094
org.codehaus.groovy.runtime.callsite.ConstructorSite.ConstructorSiteNoUnwrapNoCoerce2b1437cd786e9030
org.codehaus.groovy.runtime.callsite.GetEffectivePogoFieldSite14534893ec96e802
org.codehaus.groovy.runtime.callsite.GetEffectivePogoPropertySitec0c8cd986bf6e687
org.codehaus.groovy.runtime.callsite.GetEffectivePojoFieldSite8d92faec4ed9a2d5
org.codehaus.groovy.runtime.callsite.GetEffectivePojoPropertySite5adb025c6b8753fb
org.codehaus.groovy.runtime.callsite.GroovySunClassLoader66022e8bc078b7c3
org.codehaus.groovy.runtime.callsite.MetaClassSite92b8cbe717b1f3af
org.codehaus.groovy.runtime.callsite.MetaMethodSiteef94cbbe9b298ce8
org.codehaus.groovy.runtime.callsite.PlainObjectMetaMethodSitefc75d9e561279087
org.codehaus.groovy.runtime.callsite.PogoGetPropertySite162334dc230e07d8
org.codehaus.groovy.runtime.callsite.PogoMetaClassSited022342e1ee2d663
org.codehaus.groovy.runtime.callsite.PogoMetaMethodSiteb0d8693e198882d3
org.codehaus.groovy.runtime.callsite.PogoMetaMethodSite.PogoCachedMethodSitef887b6fb60c96e58
org.codehaus.groovy.runtime.callsite.PogoMetaMethodSite.PogoCachedMethodSiteNoUnwrap9094f6df6381b584
org.codehaus.groovy.runtime.callsite.PogoMetaMethodSite.PogoCachedMethodSiteNoUnwrapNoCoerce6f2fd547b8555413
org.codehaus.groovy.runtime.callsite.PogoMetaMethodSite.PogoMetaMethodSiteNoUnwrapNoCoercefd056df73741e893
org.codehaus.groovy.runtime.callsite.PojoMetaClassGetPropertySitecf5ae136543ef9ab
org.codehaus.groovy.runtime.callsite.PojoMetaClassSite59e5025857399318
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite79244850cbbad8c3
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite.PojoCachedMethodSite5d2b3fafaea53fd1
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite.PojoCachedMethodSiteNoUnwrap2078d2585fd99fd8
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite.PojoCachedMethodSiteNoUnwrapNoCoerce9f962f940a47def8
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite.PojoMetaMethodSiteNoUnwrap1ef6952de4b98e96
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite.PojoMetaMethodSiteNoUnwrapNoCoerce4927c9983c03780b
org.codehaus.groovy.runtime.callsite.StaticMetaMethodSite18dea215649715ff
org.codehaus.groovy.runtime.callsite.StaticMetaMethodSite.StaticMetaMethodSiteNoUnwrapd66943d3b273901c
org.codehaus.groovy.runtime.callsite.StaticMetaMethodSite.StaticMetaMethodSiteNoUnwrapNoCoercee02871e980c88480
org.codehaus.groovy.runtime.dgm.100ea7785f458462a9c
org.codehaus.groovy.runtime.dgm.1013207461d2fc64200
org.codehaus.groovy.runtime.dgm.10261493a7bdbdd7ac3
org.codehaus.groovy.runtime.dgm.1045d11f7c466b34e30
org.codehaus.groovy.runtime.dgm.1068833144f3ad2ced11
org.codehaus.groovy.runtime.dgm.1128d0469d66d2a2b560
org.codehaus.groovy.runtime.dgm.1143dc09b2bbb276c8b6
org.codehaus.groovy.runtime.dgm.1144896fcf04e2f0fa70
org.codehaus.groovy.runtime.dgm.11458b294462a331d40c
org.codehaus.groovy.runtime.dgm.11461f6302e1e3c56094
org.codehaus.groovy.runtime.dgm.115303a2fa25f9302af5
org.codehaus.groovy.runtime.dgm.1154a5ce1d013c3344fb
org.codehaus.groovy.runtime.dgm.115867fa96f8a0eea100
org.codehaus.groovy.runtime.dgm.12f257e7b30db360e5
org.codehaus.groovy.runtime.dgm.120c115e1f7f624286d
org.codehaus.groovy.runtime.dgm.1203f35dd22aba836049
org.codehaus.groovy.runtime.dgm.120495e59aba522c4068
org.codehaus.groovy.runtime.dgm.1205f43856706ccd983c
org.codehaus.groovy.runtime.dgm.1206f761799dbe2b86b6
org.codehaus.groovy.runtime.dgm.120776e8901f89e68851
org.codehaus.groovy.runtime.dgm.121704557523771932f7
org.codehaus.groovy.runtime.dgm.122987bfe144bc1c4538
org.codehaus.groovy.runtime.dgm.12332ad733c1629bd757
org.codehaus.groovy.runtime.dgm.12344e74a51030d29dac
org.codehaus.groovy.runtime.dgm.1243897fa32a7a039f7
org.codehaus.groovy.runtime.dgm.124088677dacaf77cc33
org.codehaus.groovy.runtime.dgm.12415c70981143ac43df
org.codehaus.groovy.runtime.dgm.1242cb4abfc44a37b96a
org.codehaus.groovy.runtime.dgm.1248c012392ed4d97023
org.codehaus.groovy.runtime.dgm.1249082faa4473b20073
org.codehaus.groovy.runtime.dgm.1250f3a9d77cd6ccf8ca
org.codehaus.groovy.runtime.dgm.125193822ab9be170079
org.codehaus.groovy.runtime.dgm.1252c0fb534d05104b53
org.codehaus.groovy.runtime.dgm.1253799adec188f98305
org.codehaus.groovy.runtime.dgm.1260df18452641fa5542
org.codehaus.groovy.runtime.dgm.1262d691300c622786a8
org.codehaus.groovy.runtime.dgm.137651aa53b58f09de
org.codehaus.groovy.runtime.dgm.132628de27c83f17d3b
org.codehaus.groovy.runtime.dgm.133a5ed1776b3fda765
org.codehaus.groovy.runtime.dgm.14fe5e860e173f570a
org.codehaus.groovy.runtime.dgm.1454e4f5ed4fd8ee679
org.codehaus.groovy.runtime.dgm.15700b7d7b0daf4386e
org.codehaus.groovy.runtime.dgm.15836b55aebba46794f
org.codehaus.groovy.runtime.dgm.1692e8f1f2b62361519
org.codehaus.groovy.runtime.dgm.20fbcbf9184b3027a9
org.codehaus.groovy.runtime.dgm.2004a8d6132446a2796
org.codehaus.groovy.runtime.dgm.2029cca6e056d1ee8a6
org.codehaus.groovy.runtime.dgm.203d299daa8b9baac6d
org.codehaus.groovy.runtime.dgm.21ad722b9784833903
org.codehaus.groovy.runtime.dgm.216bb2bff3a01af21bd
org.codehaus.groovy.runtime.dgm.2267af059fee90ea87
org.codehaus.groovy.runtime.dgm.221e38f5ba39eac02e7
org.codehaus.groovy.runtime.dgm.222ef1518ec9d492350
org.codehaus.groovy.runtime.dgm.223c2c90c64fea14249
org.codehaus.groovy.runtime.dgm.22463cb13796620db67
org.codehaus.groovy.runtime.dgm.227add8982e2a5c3979
org.codehaus.groovy.runtime.dgm.228b816a8734e861458
org.codehaus.groovy.runtime.dgm.229e5c8acd3d67e69e1
org.codehaus.groovy.runtime.dgm.23379c81c94e9ee2ccc
org.codehaus.groovy.runtime.dgm.234346f1382fba7c29b
org.codehaus.groovy.runtime.dgm.2361bca1d0949c79094
org.codehaus.groovy.runtime.dgm.237aec2b7fa89708a15
org.codehaus.groovy.runtime.dgm.24b37b2fdb1e481434
org.codehaus.groovy.runtime.dgm.240a37b8bc67a395ffa
org.codehaus.groovy.runtime.dgm.245cab62fdf06ce2ff3
org.codehaus.groovy.runtime.dgm.246cb27870b74da0a5d
org.codehaus.groovy.runtime.dgm.247da58aa2eb8b5c590
org.codehaus.groovy.runtime.dgm.289bcd7f13d45405fe6
org.codehaus.groovy.runtime.dgm.3032bf1eca14295905f
org.codehaus.groovy.runtime.dgm.3063565bb55c5bf9b9e
org.codehaus.groovy.runtime.dgm.30914cbdc1c795e9041
org.codehaus.groovy.runtime.dgm.31232a54a63daffe8b
org.codehaus.groovy.runtime.dgm.311feba9bfe6a020958
org.codehaus.groovy.runtime.dgm.31294e9ef32d5a78aaa
org.codehaus.groovy.runtime.dgm.31363699e37b373d385
org.codehaus.groovy.runtime.dgm.31534212eb153b57fbc
org.codehaus.groovy.runtime.dgm.316c8381a221cb761e3
org.codehaus.groovy.runtime.dgm.317930b7a880868c18b
org.codehaus.groovy.runtime.dgm.318727cb899cf1a8633
org.codehaus.groovy.runtime.dgm.319fd5867fd5a43c58b
org.codehaus.groovy.runtime.dgm.320a038e194f989905
org.codehaus.groovy.runtime.dgm.320a63b8f7c2261e9b9
org.codehaus.groovy.runtime.dgm.3210e6dde072412b561
org.codehaus.groovy.runtime.dgm.3225348a39d703880d5
org.codehaus.groovy.runtime.dgm.3234ff656f8687ddfd7
org.codehaus.groovy.runtime.dgm.324c6b5d4a7b6ca4962
org.codehaus.groovy.runtime.dgm.325ea69e715ff09ca33
org.codehaus.groovy.runtime.dgm.32648d7b9f22c8fa65a
org.codehaus.groovy.runtime.dgm.3278e3e73967f58ebbf
org.codehaus.groovy.runtime.dgm.32899373d8a0ab9588e
org.codehaus.groovy.runtime.dgm.3290904afaf9ee25d5d
org.codehaus.groovy.runtime.dgm.33e0978eff1864f4c4
org.codehaus.groovy.runtime.dgm.3304124ad5ba9e6f982
org.codehaus.groovy.runtime.dgm.331b8423fc35d8c7726
org.codehaus.groovy.runtime.dgm.3326e29bf6d36730114
org.codehaus.groovy.runtime.dgm.3335b0b41b7060f2f00
org.codehaus.groovy.runtime.dgm.334bf63d0c9197e0eab
org.codehaus.groovy.runtime.dgm.335d1311652f72cb97a
org.codehaus.groovy.runtime.dgm.33666ac71eaa23be193
org.codehaus.groovy.runtime.dgm.337c9d3793563399397
org.codehaus.groovy.runtime.dgm.338976c807a6b8340f5
org.codehaus.groovy.runtime.dgm.339dabe69e874f49e1f
org.codehaus.groovy.runtime.dgm.340a863095b3f43bd2e
org.codehaus.groovy.runtime.dgm.34189361fa3163a3215
org.codehaus.groovy.runtime.dgm.3421b08eb25758df0c7
org.codehaus.groovy.runtime.dgm.343336df6828c45cde3
org.codehaus.groovy.runtime.dgm.34482f1f322d853b3b5
org.codehaus.groovy.runtime.dgm.345c2b240bcd1331d01
org.codehaus.groovy.runtime.dgm.3460cbc33d37faeb4e8
org.codehaus.groovy.runtime.dgm.3476d313ef146986a39
org.codehaus.groovy.runtime.dgm.348ae50dd933d8ad824
org.codehaus.groovy.runtime.dgm.3496320acd06ccffc6c
org.codehaus.groovy.runtime.dgm.350cb0294e13885921e
org.codehaus.groovy.runtime.dgm.351c8d758a5e950ea1a
org.codehaus.groovy.runtime.dgm.3559e735503fc4e9aa0
org.codehaus.groovy.runtime.dgm.35689163520614cfe8f
org.codehaus.groovy.runtime.dgm.35720b0719cccfcd3dc
org.codehaus.groovy.runtime.dgm.3588c36a237ac74e4db
org.codehaus.groovy.runtime.dgm.362094b7b33c4920be
org.codehaus.groovy.runtime.dgm.3718f035d5beade1a0c
org.codehaus.groovy.runtime.dgm.37269e102d204f77039
org.codehaus.groovy.runtime.dgm.390ea68cef5b0b04193
org.codehaus.groovy.runtime.dgm.3910d5fa41581cecba8
org.codehaus.groovy.runtime.dgm.392fb9efb1bbe9c73c5
org.codehaus.groovy.runtime.dgm.4791dd82d5ace02de
org.codehaus.groovy.runtime.dgm.420bc25e601821b3934
org.codehaus.groovy.runtime.dgm.4217d3faa173a1bd4ca
org.codehaus.groovy.runtime.dgm.424fb40d18f895979bf
org.codehaus.groovy.runtime.dgm.426c383cb1e5602cabb
org.codehaus.groovy.runtime.dgm.447490443198fec378d
org.codehaus.groovy.runtime.dgm.449e9ffdff66986742f
org.codehaus.groovy.runtime.dgm.4608704dc94ad88c251
org.codehaus.groovy.runtime.dgm.461386c80d1b480e3a7
org.codehaus.groovy.runtime.dgm.472e7bc73b79f1a8b58
org.codehaus.groovy.runtime.dgm.480ac011cbf3cd679a6
org.codehaus.groovy.runtime.dgm.5ebe5bdd9fb9a6a68
org.codehaus.groovy.runtime.dgm.519125e8a918bcf2497
org.codehaus.groovy.runtime.dgm.52f4698b4bff5028ab
org.codehaus.groovy.runtime.dgm.5207a016504fa4c4d43
org.codehaus.groovy.runtime.dgm.53d869994a0211921a
org.codehaus.groovy.runtime.dgm.53991cc3ab5cb0e3d9c
org.codehaus.groovy.runtime.dgm.54523214743f886e56
org.codehaus.groovy.runtime.dgm.542daa3eb2b6a4da9c2
org.codehaus.groovy.runtime.dgm.543151779651eac0b24
org.codehaus.groovy.runtime.dgm.544e730a71cba5688cf
org.codehaus.groovy.runtime.dgm.5450a88bcd4a2d9285d
org.codehaus.groovy.runtime.dgm.55683d52e82f34bdc1
org.codehaus.groovy.runtime.dgm.550b5847707b22267e9
org.codehaus.groovy.runtime.dgm.56e237b7fe6acdc1eb
org.codehaus.groovy.runtime.dgm.563718410c86b70bbcc
org.codehaus.groovy.runtime.dgm.5647501f9a8a9a8eec4
org.codehaus.groovy.runtime.dgm.5697e22717b7e1a4948
org.codehaus.groovy.runtime.dgm.572b8ae23bb6a50e62
org.codehaus.groovy.runtime.dgm.5703e724f7b5e05cf8c
org.codehaus.groovy.runtime.dgm.57174feae43f6c40d89
org.codehaus.groovy.runtime.dgm.57213aa780f2e8d4701
org.codehaus.groovy.runtime.dgm.57388f2d479d46bd02c
org.codehaus.groovy.runtime.dgm.574e0537143fb7c0395
org.codehaus.groovy.runtime.dgm.58e1b634626065810a
org.codehaus.groovy.runtime.dgm.584212cf85e02f21d05
org.codehaus.groovy.runtime.dgm.586bde01056c0f72841
org.codehaus.groovy.runtime.dgm.58780ee5d6b52da6fa2
org.codehaus.groovy.runtime.dgm.591c95d6efe443a32c0
org.codehaus.groovy.runtime.dgm.63a9f0d4199724428
org.codehaus.groovy.runtime.dgm.6014cf2d6c6af01c19d
org.codehaus.groovy.runtime.dgm.602a703510ba3d84aed
org.codehaus.groovy.runtime.dgm.603c8498b9c250561ee
org.codehaus.groovy.runtime.dgm.606e1ee50e5dd0da8bf
org.codehaus.groovy.runtime.dgm.6074c902628a2ccbbe3
org.codehaus.groovy.runtime.dgm.60958e1868f1dcdefb6
org.codehaus.groovy.runtime.dgm.610e9d0340311f939d2
org.codehaus.groovy.runtime.dgm.61183239037621d61e9
org.codehaus.groovy.runtime.dgm.61418d150ca6acf9f77
org.codehaus.groovy.runtime.dgm.61502229c27f8b006a5
org.codehaus.groovy.runtime.dgm.6168cd1c235a5d53793
org.codehaus.groovy.runtime.dgm.617fb4f41155e3851e7
org.codehaus.groovy.runtime.dgm.6188b01c4a075dfdf01
org.codehaus.groovy.runtime.dgm.6195618f09bf738a83a
org.codehaus.groovy.runtime.dgm.62018513239be76bf6d
org.codehaus.groovy.runtime.dgm.621c7bbe7b470821a20
org.codehaus.groovy.runtime.dgm.64367f813b2794efda1
org.codehaus.groovy.runtime.dgm.6441930cee6a33afc9d
org.codehaus.groovy.runtime.dgm.6459e4412f2694077d9
org.codehaus.groovy.runtime.dgm.646629841f6faeeb7d2
org.codehaus.groovy.runtime.dgm.6473a76a22bac1a10f3
org.codehaus.groovy.runtime.dgm.648f50a8ddc51f82303
org.codehaus.groovy.runtime.dgm.681eafc5beece07adeb
org.codehaus.groovy.runtime.dgm.692dc80c0124dd0f61d
org.codehaus.groovy.runtime.dgm.7be9dc3bd5ffa5bf5
org.codehaus.groovy.runtime.dgm.7099a0022b22eb993f7
org.codehaus.groovy.runtime.dgm.711693123c69d3415fb
org.codehaus.groovy.runtime.dgm.71418cf9ec6651a70f0
org.codehaus.groovy.runtime.dgm.718c203d808486d3969
org.codehaus.groovy.runtime.dgm.719a5a8c0453b54375b
org.codehaus.groovy.runtime.dgm.723fc49513a8e307d14
org.codehaus.groovy.runtime.dgm.7628e26394a53df7e0e
org.codehaus.groovy.runtime.dgm.769bd4c20aac284881f
org.codehaus.groovy.runtime.dgm.7767163331feaddd7f5
org.codehaus.groovy.runtime.dgm.779d780a8358f7977fc
org.codehaus.groovy.runtime.dgm.784aa664a546cbee8a5
org.codehaus.groovy.runtime.dgm.7978ecd9bc747048417
org.codehaus.groovy.runtime.dgm.8228482e30e6004eeed
org.codehaus.groovy.runtime.dgm.827a93805e48487a02b
org.codehaus.groovy.runtime.dgm.88236c6c9ad2ddc3759
org.codehaus.groovy.runtime.dgm.883c796de53fe6e0d76
org.codehaus.groovy.runtime.dgm.900808bd393b83c3b99
org.codehaus.groovy.runtime.dgm.9122e8a1ab26d44af8c
org.codehaus.groovy.runtime.dgm.939f1f6c2b91a1f4d1
org.codehaus.groovy.runtime.dgm.94984c7f073b717150
org.codehaus.groovy.runtime.dgm.9418321a3a9629c67aa
org.codehaus.groovy.runtime.dgm.95b4c01aeadd2daf18
org.codehaus.groovy.runtime.dgm.96238db0311599df13
org.codehaus.groovy.runtime.dgm.9867352329b0afff09
org.codehaus.groovy.runtime.dgm.983ee8ae1b4ae6d7b79
org.codehaus.groovy.runtime.dgm.996935f33ad14112f1
org.codehaus.groovy.runtime.dgmimpl.NumberNumberDiv70cd7b5a4771dc14
org.codehaus.groovy.runtime.dgmimpl.NumberNumberDiv.DoubleInteger79437210ae039a1d
org.codehaus.groovy.runtime.dgmimpl.NumberNumberDiv.FloatFloat90d6a8d3550c0185
org.codehaus.groovy.runtime.dgmimpl.NumberNumberDiv.FloatInteger7e4343c1425d97bb
org.codehaus.groovy.runtime.dgmimpl.NumberNumberDiv.NumberNumber7ad754e1b05a69ec
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMetaMethod0f63f7d422505478
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMetaMethod.NumberNumberCallSite2ddd799b6f5bc2ab
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMinus5ff92b8a647cc5ed
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMinus.DoubleDouble63c5a1e4a1f7aae6
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMinus.DoubleInteger310e1b142b9f66b7
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMinus.IntegerInteger67d63c33f6dde97a
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMinus.LongInteger1ab694b392ef6121
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMinus.NumberNumber925aa8036f7cee1a
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply35cd54739309c55f
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.DoubleDouble20503e0c69931819
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.DoubleFloat959210cf3dd23204
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.DoubleInteger5ab2ed05b6210982
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.FloatFloate1464598722d3aba
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.FloatInteger5b59e7abcab2ca5f
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.IntegerDouble58fec10d4b0cc946
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.IntegerInteger835e123df04f35ed
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply.NumberNumber0b070814c0e36ac4
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlusc5f76e47e5dd19dc
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus.1dff6f50a09fc14b2
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus.DoubleDoublefd478e5dd9401241
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus.DoubleIntegerc95a0003a2c7d512
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus.IntegerDouble17740076d9648f96
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus.IntegerFloat309b35b2e9d9535d
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus.IntegerInteger303d8f5ec0d26a4c
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus.IntegerLong1040572db62e17e1
org.codehaus.groovy.runtime.dgmimpl.arrays.ArrayGetAtMetaMethod2f6b85eb6c007c20
org.codehaus.groovy.runtime.dgmimpl.arrays.ArrayMetaMethod0875e07a5e167a25
org.codehaus.groovy.runtime.dgmimpl.arrays.ArrayPutAtMetaMethod82807c60add013e9
org.codehaus.groovy.runtime.dgmimpl.arrays.BooleanArrayGetAtMetaMethod0ad5b94fb51eed0d
org.codehaus.groovy.runtime.dgmimpl.arrays.BooleanArrayGetAtMetaMethod.MyPojoMetaMethodSite07de2c5ba6b3e992
org.codehaus.groovy.runtime.dgmimpl.arrays.BooleanArrayPutAtMetaMethod8db3ed881b88a492
org.codehaus.groovy.runtime.dgmimpl.arrays.BooleanArrayPutAtMetaMethod.MyPojoMetaMethodSite3b7c4fae5653dde6
org.codehaus.groovy.runtime.dgmimpl.arrays.ByteArrayGetAtMetaMethodcc5b16c53401630e
org.codehaus.groovy.runtime.dgmimpl.arrays.ByteArrayGetAtMetaMethod.MyPojoMetaMethodSite5de6178bffab284f
org.codehaus.groovy.runtime.dgmimpl.arrays.ByteArrayPutAtMetaMethod107f0572824af1d1
org.codehaus.groovy.runtime.dgmimpl.arrays.CharacterArrayGetAtMetaMethod0d2d6c1fab2348ce
org.codehaus.groovy.runtime.dgmimpl.arrays.CharacterArrayPutAtMetaMethod604d28c4b4b0bff2
org.codehaus.groovy.runtime.dgmimpl.arrays.DoubleArrayGetAtMetaMethod05ca86616fd6b98c
org.codehaus.groovy.runtime.dgmimpl.arrays.DoubleArrayGetAtMetaMethod.MyPojoMetaMethodSite55c9b2b6cb061093
org.codehaus.groovy.runtime.dgmimpl.arrays.DoubleArrayPutAtMetaMethod5a4421c4a5aa5b67
org.codehaus.groovy.runtime.dgmimpl.arrays.FloatArrayGetAtMetaMethod55141a89703aab1e
org.codehaus.groovy.runtime.dgmimpl.arrays.FloatArrayGetAtMetaMethod.MyPojoMetaMethodSite78270266f4ae299c
org.codehaus.groovy.runtime.dgmimpl.arrays.FloatArrayPutAtMetaMethodb7de3636e8a78591
org.codehaus.groovy.runtime.dgmimpl.arrays.IntegerArrayGetAtMetaMethod16c3662f465c8e95
org.codehaus.groovy.runtime.dgmimpl.arrays.IntegerArrayGetAtMetaMethod.MyPojoMetaMethodSite0233031355ab2149
org.codehaus.groovy.runtime.dgmimpl.arrays.IntegerArrayPutAtMetaMethoda7f7b4f2b54be4a3
org.codehaus.groovy.runtime.dgmimpl.arrays.IntegerArrayPutAtMetaMethod.MyPojoMetaMethodSite758c64edbd7693c6
org.codehaus.groovy.runtime.dgmimpl.arrays.LongArrayGetAtMetaMethod83ae6232dfc0c614
org.codehaus.groovy.runtime.dgmimpl.arrays.LongArrayGetAtMetaMethod.MyPojoMetaMethodSitea1462781b1253a7f
org.codehaus.groovy.runtime.dgmimpl.arrays.LongArrayPutAtMetaMethod220a578eda49e9c2
org.codehaus.groovy.runtime.dgmimpl.arrays.ObjectArrayGetAtMetaMethod83db726a7822909d
org.codehaus.groovy.runtime.dgmimpl.arrays.ObjectArrayGetAtMetaMethod.MyPojoMetaMethodSite559e61f276f0b279
org.codehaus.groovy.runtime.dgmimpl.arrays.ObjectArrayPutAtMetaMethod6898a9b267f0a2ca
org.codehaus.groovy.runtime.dgmimpl.arrays.ObjectArrayPutAtMetaMethod.MyPojoMetaMethodSiteb2220f6d35599ae7
org.codehaus.groovy.runtime.dgmimpl.arrays.ShortArrayGetAtMetaMethod665a5d33eb91abd9
org.codehaus.groovy.runtime.dgmimpl.arrays.ShortArrayGetAtMetaMethod.MyPojoMetaMethodSite467e3df15969e0e3
org.codehaus.groovy.runtime.dgmimpl.arrays.ShortArrayPutAtMetaMethod1d17cb090e93dbae
org.codehaus.groovy.runtime.m12n.ExtensionModuleed477465df3104e0
org.codehaus.groovy.runtime.m12n.ExtensionModuleRegistry5a9ededb5b4c8203
org.codehaus.groovy.runtime.m12n.ExtensionModuleScanner03ea9e8eca7123a0
org.codehaus.groovy.runtime.m12n.MetaInfExtensionModulee2ce4c687402ee1b
org.codehaus.groovy.runtime.m12n.PropertiesModuleFactory2d2571e8795435d7
org.codehaus.groovy.runtime.m12n.SimpleExtensionModule9f4cfabb739c78c8
org.codehaus.groovy.runtime.m12n.StandardPropertiesModuleFactory761cd2e69020b87b
org.codehaus.groovy.runtime.memoize.CommonCache0de71714d0fbaa98
org.codehaus.groovy.runtime.memoize.CommonCache.1c27c65abfc03158e
org.codehaus.groovy.runtime.memoize.ConcurrentCommonCachec85ed488dabdd610
org.codehaus.groovy.runtime.memoize.ConcurrentSoftCache5dbbbfbd2aaca6fe
org.codehaus.groovy.runtime.memoize.EvictableCacheb91f007ff5675fd2
org.codehaus.groovy.runtime.memoize.EvictableCache.EvictionStrategy84f1a552c00d0682
org.codehaus.groovy.runtime.memoize.StampedCommonCachea9632e2ff2fa1276
org.codehaus.groovy.runtime.memoize.UnlimitedConcurrentCacheeb1206ccdb102900
org.codehaus.groovy.runtime.metaclass.ClosureMetaClassffa28352de5d66bf
org.codehaus.groovy.runtime.metaclass.ClosureMetaClass.StandardClosureChooser243fc1cebd18f54b
org.codehaus.groovy.runtime.metaclass.ClosureMetaMethodd63bc8e78b609f0b
org.codehaus.groovy.runtime.metaclass.DefaultMetaClassInfo69854cb7a75ae059
org.codehaus.groovy.runtime.metaclass.DefaultMetaClassInfo.ConstantMetaClassVersioning19c0e73328fca174
org.codehaus.groovy.runtime.metaclass.MetaClassRegistryImplb63522661c0f058a
org.codehaus.groovy.runtime.metaclass.MetaClassRegistryImpl.DefaultModuleListener0dd43522ab7cadc2
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex3f7d87440e1a02c8
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex.CacheEntry1cf611b9a503fe23
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex.Entry928da9ab93b654ce
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex.Header42ed1cc828024329
org.codehaus.groovy.runtime.metaclass.MethodHelperdef2d014f23814ed
org.codehaus.groovy.runtime.metaclass.MethodMetaProperty6719bafb35e50059
org.codehaus.groovy.runtime.metaclass.MethodMetaProperty.GetBeanMethodMetaProperty990c95a2d4f646b3
org.codehaus.groovy.runtime.metaclass.MissingMethodExceptionNoStack343fc68c58ce3cfb
org.codehaus.groovy.runtime.metaclass.MissingPropertyExceptionNoStack258ecd611a2ae3fb
org.codehaus.groovy.runtime.metaclass.NewInstanceMetaMethoded353f8638ebc2bf
org.codehaus.groovy.runtime.metaclass.NewMetaMethod9e730174718f1600
org.codehaus.groovy.runtime.metaclass.NewStaticMetaMethodb6402327186d91f2
org.codehaus.groovy.runtime.metaclass.ReflectionMetaMethodd3e7de4c988eb734
org.codehaus.groovy.runtime.metaclass.TransformMetaMethod4b8830543b65f02c
org.codehaus.groovy.runtime.powerassert.Value19f2adc6181105d2
org.codehaus.groovy.runtime.powerassert.ValueRecorderaacc5f4cfb4e4355
org.codehaus.groovy.runtime.typehandling.BigDecimalMathab40532ec1590c0a
org.codehaus.groovy.runtime.typehandling.BigIntegerMathd4069ccbb61a912b
org.codehaus.groovy.runtime.typehandling.DefaultTypeTransformation4ebd7e82d874c4a9
org.codehaus.groovy.runtime.typehandling.FloatingPointMath2082fc51ee3c065b
org.codehaus.groovy.runtime.typehandling.IntegerMathed228d7d0ce9f766
org.codehaus.groovy.runtime.typehandling.LongMath150b8d228f43cda4
org.codehaus.groovy.runtime.typehandling.NumberMathb85b68a1f30ed881
org.codehaus.groovy.runtime.typehandling.NumberMathModificationInfo16073598c4d4b809
org.codehaus.groovy.runtime.typehandling.ShortTypeHandlingc08b194108b9836a
org.codehaus.groovy.runtime.wrappers.PojoWrapperad73fa1bed6281f2
org.codehaus.groovy.runtime.wrappers.Wrapperdac6b72a0467a5c5
org.codehaus.groovy.syntax.CSTNodee1b3bede0b8fe0e9
org.codehaus.groovy.syntax.Numbersdb3b2f9d0b28a09b
org.codehaus.groovy.syntax.Tokende2322d609e6026e
org.codehaus.groovy.syntax.TokenUtilf729588ddc9e95dd
org.codehaus.groovy.syntax.Typesf5e0a08b91e395a9
org.codehaus.groovy.tools.GroovyClasse520a4d734e2a8fc
org.codehaus.groovy.transform.ASTTransformationCollectorCodeVisitor1bd8e2ac03d06262
org.codehaus.groovy.transform.ASTTransformationVisitor40ce379a8096ddf6
org.codehaus.groovy.transform.ASTTransformationVisitor.15ffb0c672f7c007d
org.codehaus.groovy.transform.AbstractASTTransformationf471ab89d3f28db6
org.codehaus.groovy.transform.AnnotationCollectorTransform.ClassChanger0dbf60b2e07086c3
org.codehaus.groovy.transform.BaseScriptASTTransformationbda3d63053c3b062
org.codehaus.groovy.transform.ImmutableASTTransformationc29b5220fcc7222d
org.codehaus.groovy.transform.ImmutableASTTransformation.checkPropNamesbb81c206abaf5301
org.codehaus.groovy.transform.sc.StaticCompilationMetadataKeysa6a3062c7028b823
org.codehaus.groovy.transform.stc.AbstractExtensionMethodCache02fd7175c6e38266
org.codehaus.groovy.transform.stc.ExtensionMethodCachef51677d032d3d69b
org.codehaus.groovy.transform.stc.StaticTypeCheckingSupport5ca5e2883c6cd597
org.codehaus.groovy.transform.stc.StaticTypeCheckingSupport.15d6a212ab1adc914
org.codehaus.groovy.transform.stc.StaticTypesMarkerf460ed64d77568a2
org.codehaus.groovy.transform.trait.SuperCallTraitTransformerd4b18d3c9a8e464a
org.codehaus.groovy.transform.trait.TraitComposerd1328c3b9a9e6764
org.codehaus.groovy.transform.trait.Traitsedf89f7a14f188a8
org.codehaus.groovy.util.AbstractConcurrentMapf2773deedc25ff76
org.codehaus.groovy.util.AbstractConcurrentMap.Segment0d89bebc871fa6a1
org.codehaus.groovy.util.AbstractConcurrentMapBaseb73b5e16f44bef7c
org.codehaus.groovy.util.AbstractConcurrentMapBase.Segmentf7b46e74564068d9
org.codehaus.groovy.util.ArrayIteratorae30b1bc722d6218
org.codehaus.groovy.util.CharSequenceReadere9bd433fb40cce30
org.codehaus.groovy.util.ComplexKeyHashMapeeb574661e55ff8b
org.codehaus.groovy.util.ComplexKeyHashMap.17bd7c3fe1ce6027b
org.codehaus.groovy.util.ComplexKeyHashMap.Entryeeacac5f11701451
org.codehaus.groovy.util.FastArray260fe3d68b5a7f06
org.codehaus.groovy.util.LazyReference3a72edad0b34fd3c
org.codehaus.groovy.util.LazyReference.15b01fc8782a05a03
org.codehaus.groovy.util.LazyReference.233930cb46179f98d
org.codehaus.groovy.util.ListHashMapaa31090a511646f0
org.codehaus.groovy.util.LockableObjectdc4f076ceb565d33
org.codehaus.groovy.util.ManagedConcurrentLinkedQueuebe31bdbbff860e62
org.codehaus.groovy.util.ManagedConcurrentLinkedQueue.Element741c99bf25e0d9bc
org.codehaus.groovy.util.ManagedConcurrentLinkedQueue.Itr4433e4cd0adb5841
org.codehaus.groovy.util.ManagedConcurrentMap93e9531b4cf56c0b
org.codehaus.groovy.util.ManagedConcurrentMap.Entry0bedc24d566f1183
org.codehaus.groovy.util.ManagedConcurrentMap.EntryWithValue4058ad84d76e5214
org.codehaus.groovy.util.ManagedConcurrentMap.Segmentc00e99867b2c0e3a
org.codehaus.groovy.util.ManagedReference8d113a199d213ba0
org.codehaus.groovy.util.ManagedReference.15470866116c99219
org.codehaus.groovy.util.ReferenceBundle637a9c10b34d98ad
org.codehaus.groovy.util.ReferenceManager1c01e820c5ba93db
org.codehaus.groovy.util.ReferenceManager.10df3dac8637686eb
org.codehaus.groovy.util.ReferenceManager.CallBackedManagerdb00888013e9be2c
org.codehaus.groovy.util.ReferenceTypef13e4ca82dc788f9
org.codehaus.groovy.util.ReferenceType.1d060dc74d9930918
org.codehaus.groovy.util.ReferenceType.2b86d31584085c531
org.codehaus.groovy.util.ReferenceType.39d616716af5c302e
org.codehaus.groovy.util.ReferenceType.48fd188adb44cedc0
org.codehaus.groovy.util.ReferenceType.HardRefb3c9f3a994f4c2af
org.codehaus.groovy.util.ReferenceType.SoftRef171a10557d19e020
org.codehaus.groovy.util.ReferenceType.WeakRef23ab35e002bf320c
org.codehaus.groovy.util.ReleaseInfofc0a62f7c2c749bd
org.codehaus.groovy.util.SingleKeyHashMap85dd417b948ae698
org.codehaus.groovy.util.SingleKeyHashMap.Entry68bcd9246755dbf8
org.codehaus.groovy.util.TripleKeyHashMap63a5d958758c7d1c
org.codehaus.groovy.util.URLStreamsce7e066e58a43e4b
org.codehaus.groovy.vmplugin.VMPluginFactoryb0285a20264e6f9b
org.codehaus.groovy.vmplugin.v8.IndyInterface00510492915089b1
org.codehaus.groovy.vmplugin.v8.Java8f7c4f21e8c6c421a
org.codehaus.groovy.vmplugin.v8.Java8.13f039fde877d155e
org.codehaus.groovy.vmplugin.v8.MethodHandleWrapper01b9dcabff568fcd
org.codehaus.groovy.vmplugin.v8.MethodHandleWrapper.NullMethodHandleWrapper58f0576f3d10e39d
org.codehaus.groovy.vmplugin.v9.ClassFindera1ad258f96aab87d
org.codehaus.groovy.vmplugin.v9.ClassFinder.1c19d59503b0fbe7c
org.codehaus.groovy.vmplugin.v9.Java95124fdd40188b518
org.gradle.api.internal.tasks.testing.AbstractTestDescriptor32f6e4a66d41d5b0
org.gradle.api.internal.tasks.testing.DefaultNestedTestSuiteDescriptor9620acaea12ffa6e
org.gradle.api.internal.tasks.testing.DefaultTestClassDescriptor29a580f844a707e9
org.gradle.api.internal.tasks.testing.DefaultTestClassRunInfo68a7e79b2914fd4d
org.gradle.api.internal.tasks.testing.DefaultTestDescriptor41c956a01da552a8
org.gradle.api.internal.tasks.testing.DefaultTestOutputEvent8b3d72b91c24a69b
org.gradle.api.internal.tasks.testing.DefaultTestSuiteDescriptor7ca2225e2fb0b4b2
org.gradle.api.internal.tasks.testing.JULRedirectorbae8ac50b3f8106a
org.gradle.api.internal.tasks.testing.SuiteTestClassProcessor61188fe4ac13d309
org.gradle.api.internal.tasks.testing.TestCompleteEvent94a6da85674017e0
org.gradle.api.internal.tasks.testing.TestStartEvent739a2bff9c36ddab
org.gradle.api.internal.tasks.testing.junit.AbstractJUnitSpecb4052de24abbd62e
org.gradle.api.internal.tasks.testing.junit.AbstractJUnitTestClassProcessor0ee838ac107f426f
org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecutor2281875de122ac9a
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformSpec978629975562ea4d
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestClassProcessor699c161b7e534ea1
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestClassProcessor.CollectAllTestClassesExecutor140c22d19df9b056
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestExecutionListener35e455b00b854c8d
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestFramework.JUnitPlatformTestClassProcessorFactoryc842096d77aca268
org.gradle.api.internal.tasks.testing.processors.CaptureTestOutputTestResultProcessor29d7c45ae3841147
org.gradle.api.internal.tasks.testing.processors.DefaultStandardOutputRedirectorc0ce962d1b0afc6c
org.gradle.api.internal.tasks.testing.processors.DefaultStandardOutputRedirector.DiscardActionb1b2c3fe575a0572
org.gradle.api.internal.tasks.testing.processors.DefaultStandardOutputRedirector.WriteAction313e54868cd6f7bf
org.gradle.api.internal.tasks.testing.processors.TestOutputRedirector3b6a0392c9e3dd02
org.gradle.api.internal.tasks.testing.processors.TestOutputRedirector.Forwarder2f0a51434c23293a
org.gradle.api.internal.tasks.testing.results.AttachParentTestResultProcessor6d02567fd2a7d62d
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer98c3602d2ae23686
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultNestedTestSuiteDescriptorSerializer3751209131cb00f3
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestClassDescriptorSerializer88da5e4e47bea0be
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestClassRunInfoSerializer16e060e24c40b3ec
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestDescriptorSerializerb9ede6db3d714866
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestFailureSerializer0e16302de19104eb
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestMethodDescriptorSerializer521ce0c83b2b30f2
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestOutputEventSerializerba73c594c2446ee2
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestSuiteDescriptorSerializerf4619a73c48eb708
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.IdSerializer361d13899063c8a9
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.NullableSerializer5cbed3ccfefb7534
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.TestCompleteEventSerializer89d60f01444cef1d
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.TestStartEventSerializer502d016f53db9994
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.WorkerTestSuiteDescriptorSerializer0837a9b57d136d58
org.gradle.api.internal.tasks.testing.worker.TestWorker9da5c91b10b63d25
org.gradle.api.internal.tasks.testing.worker.TestWorker.1ccbe896c0f005689
org.gradle.api.internal.tasks.testing.worker.TestWorker.2fd29a630d60da1b8
org.gradle.api.internal.tasks.testing.worker.TestWorker.3b9c4ca4676087868
org.gradle.api.internal.tasks.testing.worker.TestWorker.Statefa812eb1a8e0dd23
org.gradle.api.internal.tasks.testing.worker.TestWorker.TestFrameworkServiceRegistrye033f3a95f86732e
org.gradle.api.internal.tasks.testing.worker.WorkerTestClassProcessor0f6525b02f2bb3d2
org.gradle.api.internal.tasks.testing.worker.WorkerTestClassProcessor.WorkerTestSuiteDescriptorb3807e9b92351840
org.gradle.api.logging.LogLevel236e938e30516638
org.gradle.api.tasks.testing.TestOutputEvent.Destinationa16caef1c278a81f
org.gradle.api.tasks.testing.TestResult.ResultType12ffe39597b5f810
org.gradle.internal.Cast6130c81e08d81640
org.gradle.internal.MutableBoolean349de2b8a37d4338
org.gradle.internal.SystemProperties810fd8e754c7d6a0
org.gradle.internal.actor.internal.DefaultActorFactory2776985b64942a33
org.gradle.internal.actor.internal.DefaultActorFactory.BlockingActorabe31a572fce57fa
org.gradle.internal.concurrent.AbstractDelegatingExecutorService23206bedfd21e59c
org.gradle.internal.concurrent.CompositeStoppablee322584f6cb5969a
org.gradle.internal.concurrent.CompositeStoppable.197a4a0e348c32238
org.gradle.internal.concurrent.CompositeStoppable.2d6e18202f5962cf7
org.gradle.internal.concurrent.DefaultExecutorFactory2b21e1d83ebfaaa9
org.gradle.internal.concurrent.DefaultExecutorFactory.TrackedManagedExecutord6ac699fb857c8f1
org.gradle.internal.concurrent.ExecutorPolicy.CatchAndRecordFailures2aacf6d3d0dd2240
org.gradle.internal.concurrent.ManagedExecutorImpl1ee3456e89278283
org.gradle.internal.concurrent.ManagedExecutorImpl.1c5560af797119838
org.gradle.internal.concurrent.ThreadFactoryImpl1d388becbfb01ad8
org.gradle.internal.dispatch.ContextClassLoaderDispatch132d0c3fd93e8141
org.gradle.internal.dispatch.ContextClassLoaderProxy4295807baa6fbb83
org.gradle.internal.dispatch.MethodInvocation56dc845f6b509e42
org.gradle.internal.dispatch.ProxyDispatchAdapter56827e7cbc177632
org.gradle.internal.dispatch.ProxyDispatchAdapter.DispatchingInvocationHandler91c49d997210d865
org.gradle.internal.dispatch.ReflectionDispatch6976fdf67f3e8979
org.gradle.internal.event.AbstractBroadcastDispatch3624329b4268ace0
org.gradle.internal.event.BroadcastDispatch317320da099c9e99
org.gradle.internal.event.BroadcastDispatch.CompositeDispatchd65d5f15820c83b7
org.gradle.internal.event.BroadcastDispatch.EmptyDispatch3f4374492fbf9f05
org.gradle.internal.event.BroadcastDispatch.SingletonDispatch197c709d5c09a071
org.gradle.internal.event.ListenerBroadcast242551180f1990eb
org.gradle.internal.id.CompositeIdGeneratorf1c607aa5fccdbaa
org.gradle.internal.id.CompositeIdGenerator.CompositeIde710c854f802c58b
org.gradle.internal.id.LongIdGenerator6f8168bf486a560d
org.gradle.internal.id.UUIDGenerator047a43ab94df6ffa
org.gradle.internal.io.BufferCaster88a8af829d9f2dca
org.gradle.internal.io.ClassLoaderObjectInputStream393405e0e488b876
org.gradle.internal.io.LineBufferingOutputStreambcb3506c9cb335f2
org.gradle.internal.io.LinePerThreadBufferingOutputStream2d2cc4f27d1ed01d
org.gradle.internal.io.NullOutputStreameefcfe0665bbfe4c
org.gradle.internal.io.StreamByteBuffer3ec3288935eb4819
org.gradle.internal.io.StreamByteBuffer.StreamByteBufferChunkae7975dc16af2356
org.gradle.internal.io.StreamByteBuffer.StreamByteBufferInputStreamd1fc83f589a55ee4
org.gradle.internal.io.StreamByteBuffer.StreamByteBufferOutputStreamc78a467a714934e8
org.gradle.internal.logging.config.LoggingSystemAdapter2bb5150ee66232e9
org.gradle.internal.logging.config.LoggingSystemAdapter.SnapshotImpl221de860d84422df
org.gradle.internal.logging.events.EndOutputEvent0d8edd2a5ce274ee
org.gradle.internal.logging.events.LogLevelChangeEvent33b762c6d5852de7
org.gradle.internal.logging.events.OutputEvent85bce87f1bcda18d
org.gradle.internal.logging.events.OutputEventListenere7d50a9306531b49
org.gradle.internal.logging.events.OutputEventListener.15c6014dff2070607
org.gradle.internal.logging.events.StyledTextOutputEvent11f48fb1a17330c8
org.gradle.internal.logging.events.StyledTextOutputEvent.Spanc55f799d2631f13d
org.gradle.internal.logging.serializer.LogEventSerializerb6d88af223db296a
org.gradle.internal.logging.serializer.LogLevelChangeEventSerializerf77a59533dde75ec
org.gradle.internal.logging.serializer.SpanSerializer5f773b7d1ad07c9f
org.gradle.internal.logging.serializer.StyledTextOutputEventSerializerfaebed27ac3e65ba
org.gradle.internal.logging.services.DefaultLoggingManager61e216a064052ff1
org.gradle.internal.logging.services.DefaultLoggingManager.StartableLoggingRouter78396be937af48de
org.gradle.internal.logging.services.DefaultLoggingManager.StartableLoggingSystemb121a97021902643
org.gradle.internal.logging.services.DefaultLoggingManagerFactoryeb1ab97193f0d177
org.gradle.internal.logging.services.LoggingServiceRegistryf7c6b2b4c1aabace
org.gradle.internal.logging.services.LoggingServiceRegistry.12370ed12ee012d1c
org.gradle.internal.logging.services.LoggingServiceRegistry.CommandLineLoggingab0f3ffb5e657e79
org.gradle.internal.logging.services.TextStreamOutputEventListeneref4d0c3267356598
org.gradle.internal.logging.sink.OutputEventListenerManagerd6dee3d6fea49020
org.gradle.internal.logging.sink.OutputEventListenerManager.11e218a705ff0ee7e
org.gradle.internal.logging.sink.OutputEventRenderer3242b1f39e5b4911
org.gradle.internal.logging.sink.OutputEventRenderer.1d0ad61bd942acf8d
org.gradle.internal.logging.sink.OutputEventRenderer.2c0bff913afc6c760
org.gradle.internal.logging.sink.OutputEventRenderer.LazyListenere84c1b95fd8c83a6
org.gradle.internal.logging.sink.OutputEventRenderer.SnapshotImple02a04091406dd13
org.gradle.internal.logging.sink.OutputEventTransformerd74cb4ea0743b70c
org.gradle.internal.logging.slf4j.BuildOperationAwareLoggerdaea5fa552e68a8c
org.gradle.internal.logging.slf4j.OutputEventListenerBackedLogger30ddd0a8ff91b5f5
org.gradle.internal.logging.slf4j.OutputEventListenerBackedLoggerContext72dc0f41325be396
org.gradle.internal.logging.slf4j.OutputEventListenerBackedLoggerContext.NoOpLogger055691a9e394e9e0
org.gradle.internal.logging.slf4j.Slf4jLoggingConfigurer75fba29c3739b15f
org.gradle.internal.logging.source.DefaultStdErrLoggingSystemfd3dd0caab2f1d95
org.gradle.internal.logging.source.DefaultStdOutLoggingSystem528bb39bfb67c3ae
org.gradle.internal.logging.source.JavaUtilLoggingSystem5e967b17aabfd442
org.gradle.internal.logging.source.JavaUtilLoggingSystem.SnapshotImpl15dfc30250723749
org.gradle.internal.logging.source.PrintStreamLoggingSystem1ae6e6b715c6b3f9
org.gradle.internal.logging.source.PrintStreamLoggingSystem.165643cb979acba64
org.gradle.internal.logging.source.PrintStreamLoggingSystem.OutputEventDestination8c1ddf1476568828
org.gradle.internal.logging.source.PrintStreamLoggingSystem.PrintStreamDestination9e7273f370028123
org.gradle.internal.logging.source.PrintStreamLoggingSystem.SnapshotImpl8f80a46f9780a57e
org.gradle.internal.logging.text.StyledTextOutput.Styled676557b62e3f601
org.gradle.internal.nativeintegration.filesystem.services.FileSystemServicesb25a2a743a08dd2a
org.gradle.internal.nativeintegration.jansi.DefaultJansiRuntimeResolver1aa17f25c9c1cad4
org.gradle.internal.nativeintegration.jansi.JansiBootPathConfigurer3a766bce65ac1a48
org.gradle.internal.nativeintegration.jansi.JansiLibraryFactory0cbaac430d6656c4
org.gradle.internal.nativeintegration.jansi.JansiStorageLocatorc8bff1ccb071f9b6
org.gradle.internal.nativeintegration.services.NativeServices6715cc6d92dea3b5
org.gradle.internal.nativeintegration.services.NativeServices.178f3514bf8f5a62c
org.gradle.internal.nativeintegration.services.NativeServices.NativeFeaturesc774d523e3f9b59b
org.gradle.internal.nativeintegration.services.NativeServices.NativeFeatures.14bb4f8ed34497df1
org.gradle.internal.nativeintegration.services.NativeServices.NativeFeatures.2582383b6b49cc48b
org.gradle.internal.reflect.JavaMethod0a68dd097d27f97b
org.gradle.internal.remote.internal.KryoBackedMessageSerializer0028157720ec1f27
org.gradle.internal.remote.internal.hub.ConnectionSet323708d9214e34e4
org.gradle.internal.remote.internal.hub.ConnectionState250fb1b274991d9a
org.gradle.internal.remote.internal.hub.DefaultMethodArgsSerializerb5f4b38125033ffd
org.gradle.internal.remote.internal.hub.DefaultMethodArgsSerializer.ArraySerializer16505d5ccbb1b78b
org.gradle.internal.remote.internal.hub.DefaultMethodArgsSerializer.EmptyArraySerializercdc53c79a631aa33
org.gradle.internal.remote.internal.hub.IncomingQueue0e8ecdb8f31efe51
org.gradle.internal.remote.internal.hub.InterHubMessageSerializer7d84d4aa85858c73
org.gradle.internal.remote.internal.hub.InterHubMessageSerializer.MessageReaderab1cd6753eb75a29
org.gradle.internal.remote.internal.hub.InterHubMessageSerializer.MessageWriter3e4611f758508afb
org.gradle.internal.remote.internal.hub.JavaSerializationBackedMethodArgsSerializer4c7a738ee4525ff6
org.gradle.internal.remote.internal.hub.MessageHub1326887a1f1da0ac
org.gradle.internal.remote.internal.hub.MessageHub.ChannelDispatch8a9dfd1b6306d8e6
org.gradle.internal.remote.internal.hub.MessageHub.ConnectionDispatchdf1d0a86180d66e4
org.gradle.internal.remote.internal.hub.MessageHub.ConnectionReceivee1dc78071e8e957d
org.gradle.internal.remote.internal.hub.MessageHub.Discard63a8d677cc1f9101
org.gradle.internal.remote.internal.hub.MessageHub.Handler3d232f51f2c02828
org.gradle.internal.remote.internal.hub.MessageHub.State1b76747d7bce6b89
org.gradle.internal.remote.internal.hub.MessageHubBackedClient77c2124c3c43d832
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnectionc23964928f1aff22
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection.1ac806a6bc6b1b21d
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection.28ac38215966e3a20
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection.DispatchWrapper9aa5d8679dbc6601
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer47063ab293644e83
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer.MethodDetailsb6b7fb55e88cc4b9
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer.MethodInvocationReadere6b939136f207ff5
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer.MethodInvocationWriterc3b77db1b2556afe
org.gradle.internal.remote.internal.hub.OutgoingQueuefbcc05506ad40c68
org.gradle.internal.remote.internal.hub.protocol.ChannelIdentifier7697ff6a7c712869
org.gradle.internal.remote.internal.hub.protocol.ChannelMessage9bff479666e58802
org.gradle.internal.remote.internal.hub.protocol.EndOfStreamf29ffed85365f7db
org.gradle.internal.remote.internal.hub.protocol.InterHubMessage0c6e49b6ec077e16
org.gradle.internal.remote.internal.hub.protocol.InterHubMessage.Delivery0652d09c2a7fd1ac
org.gradle.internal.remote.internal.hub.queue.EndPointQueue8038a5636529123d
org.gradle.internal.remote.internal.hub.queue.MultiChannelQueuebcaac9c224068764
org.gradle.internal.remote.internal.hub.queue.MultiEndPointQueue27222a892157733f
org.gradle.internal.remote.internal.hub.queue.MultiEndPointQueue.144049b3edc682954
org.gradle.internal.remote.internal.hub.queue.QueueInitializerad18361c23e679b1
org.gradle.internal.remote.internal.inet.MultiChoiceAddress91381aa03cdd48e7
org.gradle.internal.remote.internal.inet.MultiChoiceAddressSerializer7ffc395650705aaa
org.gradle.internal.remote.internal.inet.SocketConnectCompletion0da46ac4ccd1c9ce
org.gradle.internal.remote.internal.inet.SocketConnectionb648fc2e70525f62
org.gradle.internal.remote.internal.inet.SocketConnection.1c7f1074dffaaa188
org.gradle.internal.remote.internal.inet.SocketConnection.SocketInputStream07d3128481116a21
org.gradle.internal.remote.internal.inet.SocketConnection.SocketOutputStream341e7a58a4db0e94
org.gradle.internal.remote.internal.inet.SocketInetAddress20cc3fd7992230e8
org.gradle.internal.remote.internal.inet.SocketInetAddress.Serializerd42dd7f644e6367c
org.gradle.internal.remote.internal.inet.TcpOutgoingConnectore658ec26090de909
org.gradle.internal.remote.services.MessagingServicesd686a35c2f44fd41
org.gradle.internal.serialize.AbstractCollectionSerializer7897b7a9a0c39b1b
org.gradle.internal.serialize.AbstractDecoder6f331f65d3691839
org.gradle.internal.serialize.AbstractEncoder44ea8279ea7b3a07
org.gradle.internal.serialize.AbstractSerializerd5cd8744f99ef12d
org.gradle.internal.serialize.BaseSerializerFactorya2c0786758dee183
org.gradle.internal.serialize.BaseSerializerFactory.BigDecimalSerializereaa6b28a3f51642f
org.gradle.internal.serialize.BaseSerializerFactory.BigIntegerSerializer2c98e0b9e0f1c9d6
org.gradle.internal.serialize.BaseSerializerFactory.BooleanSerializer01d661072eaac67a
org.gradle.internal.serialize.BaseSerializerFactory.ByteArraySerializerc95017bf4a6a13b0
org.gradle.internal.serialize.BaseSerializerFactory.ByteSerializer326ce383c860adf4
org.gradle.internal.serialize.BaseSerializerFactory.CharSerializer185db85ea555f9d6
org.gradle.internal.serialize.BaseSerializerFactory.DoubleSerializer2796bf234c73e7cb
org.gradle.internal.serialize.BaseSerializerFactory.EnumSerializerbc8e40c86cbebbb4
org.gradle.internal.serialize.BaseSerializerFactory.FileSerializer6b2c976d4c079b4a
org.gradle.internal.serialize.BaseSerializerFactory.FloatSerializerf45b2dbd2336d986
org.gradle.internal.serialize.BaseSerializerFactory.IntegerSerializer734708aca1fe18da
org.gradle.internal.serialize.BaseSerializerFactory.LongSerializer0497f558058801fb
org.gradle.internal.serialize.BaseSerializerFactory.PathSerializer7f407ac8eaa83c3d
org.gradle.internal.serialize.BaseSerializerFactory.ShortSerializerd4db65a796bf2a6f
org.gradle.internal.serialize.BaseSerializerFactory.StringMapSerializer7475bd2ad2bb1697
org.gradle.internal.serialize.BaseSerializerFactory.StringSerializer7f499f41addd77f8
org.gradle.internal.serialize.BaseSerializerFactory.ThrowableSerializer633508dbaf48bdcc
org.gradle.internal.serialize.DefaultSerializerf0908e23b4486288
org.gradle.internal.serialize.DefaultSerializerRegistrybccb6ea7fac4be43
org.gradle.internal.serialize.DefaultSerializerRegistry.1aeba2bb0cd2eab52
org.gradle.internal.serialize.DefaultSerializerRegistry.HierarchySerializerMatcher08978397ff3758b5
org.gradle.internal.serialize.DefaultSerializerRegistry.SerializerClassMatcherStrategyba32e9908243e058
org.gradle.internal.serialize.DefaultSerializerRegistry.StrictSerializerMatcherbf2d15aadb549e1b
org.gradle.internal.serialize.DefaultSerializerRegistry.TaggedTypeSerializer9b52326bf2c6ddd8
org.gradle.internal.serialize.DefaultSerializerRegistry.TypeInfo3834d9c5be1780ea
org.gradle.internal.serialize.HashCodeSerializer4cc78fc15c246fbf
org.gradle.internal.serialize.InputStreamBackedDecoder9a2f2a313ec9574e
org.gradle.internal.serialize.ListSerializerfe472a367fb15381
org.gradle.internal.serialize.kryo.KryoBackedDecoder049230c38fa3ed37
org.gradle.internal.serialize.kryo.KryoBackedEncoderd597a43e40a9bc17
org.gradle.internal.serialize.kryo.TypeSafeSerializer1dbc9e4c69fd1973
org.gradle.internal.serialize.kryo.TypeSafeSerializer.1bb88df969641a032
org.gradle.internal.serialize.kryo.TypeSafeSerializer.2599bac595545b9c0
org.gradle.internal.service.AbstractServiceMethodd8f9bf72435aa0d5
org.gradle.internal.service.DefaultServiceMethodFactory7cd5dc9e6187cc39
org.gradle.internal.service.DefaultServiceRegistrye812d57c8cbe70bb
org.gradle.internal.service.DefaultServiceRegistry.1b8e7be63f558d71b
org.gradle.internal.service.DefaultServiceRegistry.ClassInspector73c2555fc16b4d7b
org.gradle.internal.service.DefaultServiceRegistry.ClassInspector.ClassDetailsb5ac5ca49eb8b5f8
org.gradle.internal.service.DefaultServiceRegistry.CompositeServiceProviderb35f3ed0079ee1b1
org.gradle.internal.service.DefaultServiceRegistry.ConstructorService99d4e83b62f6c71d
org.gradle.internal.service.DefaultServiceRegistry.FactoryMethodServicef548fe5e570a0165
org.gradle.internal.service.DefaultServiceRegistry.FactoryService73f3169b4ece0cc4
org.gradle.internal.service.DefaultServiceRegistry.FixedInstanceService2830708935d6532d
org.gradle.internal.service.DefaultServiceRegistry.ManagedObjectServiceProvidere30f35e2b7befe05
org.gradle.internal.service.DefaultServiceRegistry.OwnServicesbeca926a689d0a9c
org.gradle.internal.service.DefaultServiceRegistry.ParentServices533bcf170a669503
org.gradle.internal.service.DefaultServiceRegistry.SingletonService41cfab6891dfddf6
org.gradle.internal.service.DefaultServiceRegistry.SingletonService.BindState226dbcd711a417fd
org.gradle.internal.service.DefaultServiceRegistry.Statee37eaf16d0902c8a
org.gradle.internal.service.DefaultServiceRegistry.ThisAsService5510700e4ef73f2b
org.gradle.internal.service.InjectUtil4e32c5f95305147b
org.gradle.internal.service.MethodHandleBasedServiceMethodb2fda0561994a9ab
org.gradle.internal.service.MethodHandleBasedServiceMethodFactory47e87df4713e4ce5
org.gradle.internal.service.ReflectionBasedServiceMethod5ced5cdb55f87900
org.gradle.internal.service.RelevantMethods542516b051995f02
org.gradle.internal.service.RelevantMethodsBuilder2254a458e7bdf1d8
org.gradle.internal.time.MonotonicClock0242c0eb492f1d96
org.gradle.internal.time.Time118854647ab7eed4
org.gradle.internal.time.TimeSourcea96871955c3b895e
org.gradle.internal.time.TimeSource.16de78f6b0e44a7b2
org.gradle.process.internal.worker.WorkerLoggingSerializeradae78bad8b0e727
org.gradle.process.internal.worker.child.ActionExecutionWorkera7d30aba9c762788
org.gradle.process.internal.worker.child.ActionExecutionWorker.1d0eba6bfe3f78d57
org.gradle.process.internal.worker.child.DefaultWorkerDirectoryProvider10469cccf2e081cb
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker17b95f15eaa07d70
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.ContextImpl27b0c9c7b5953885
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.PrintUnrecoverableErrorToFileHandler179c34a4cb004453
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.WorkerServicese29b55055b18b64e
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.WorkerServices.1091d7eaf53db5857
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.WorkerServices.1.10822ba3126a5be33
org.gradle.process.internal.worker.child.WorkerLogEventListener4a0b5fb708591833
org.gradle.process.internal.worker.messaging.WorkerConfigece764724104ef61
org.gradle.process.internal.worker.messaging.WorkerConfigSerializer2c2683d09378840c
org.jocl.CLbe2b7810f6117c46
org.jocl.CL.140bda816ffe14bd1
org.jocl.CL.26d3f2a92835333f7
org.jocl.CLExceptione8d31618870293ea
org.jocl.LibInitializer972a01f330e2b4cd
org.jocl.LibUtilsd016ded731a8db56
org.jocl.LibUtils.1f87faf29dfb4a946
org.jocl.LibUtils.ArchType10d4e9603723bf16
org.jocl.LibUtils.OSType1ab46273bca5c83c
org.jocl.NativePointerObject888c003dc5d6dd7d
org.jocl.Pointer99d83aeefb3777cd
org.jocl.Sizeof6ba34b06a095b15a
org.jocl.cl_abstract_propertiesb5ab0e2295f6e838
org.jocl.cl_command_queuedcb7ac1fad445c81
org.jocl.cl_context5c8d158a0af97740
org.jocl.cl_context_properties6ea8aec469711624
org.jocl.cl_device_id6ec066f8b822dd20
org.jocl.cl_event4f14aba8281c81e5
org.jocl.cl_kernelb2698b27679e32f6
org.jocl.cl_memdf07e5c0940da9bc
org.jocl.cl_platform_id76b52884feb76c8e
org.jocl.cl_program2e3be1502fb52327
org.junit.jupiter.api.DisplayNameGeneratorc35a1c5eacb0d650
org.junit.jupiter.api.DisplayNameGenerator.IndicativeSentencesbf14238441e2d44f
org.junit.jupiter.api.DisplayNameGenerator.ReplaceUnderscores946dd39b6ab63dc8
org.junit.jupiter.api.DisplayNameGenerator.Simple5296f9c1389f9932
org.junit.jupiter.api.DisplayNameGenerator.Standarda997bc0f4853379a
org.junit.jupiter.api.TestInstance.Lifecycle37b0dab0031994d2
org.junit.jupiter.api.extension.ConditionEvaluationResultfc311dfabd3a0e23
org.junit.jupiter.api.extension.ExtensionContext9ee1dfa45d7441f9
org.junit.jupiter.api.extension.ExtensionContext.Namespace0cd9bf9f40b606c0
org.junit.jupiter.api.extension.InvocationInterceptor78636fba04d849bd
org.junit.jupiter.engine.JupiterTestEngine011031d0b1fe58db
org.junit.jupiter.engine.config.CachingJupiterConfiguration5a6713fc9cdf0bee
org.junit.jupiter.engine.config.DefaultJupiterConfiguration728d0b1bd257a49b
org.junit.jupiter.engine.config.EnumConfigurationParameterConverter433eec982a6fabbc
org.junit.jupiter.engine.config.InstantiatingConfigurationParameterConverter665228d315b7ac04
org.junit.jupiter.engine.descriptor.AbstractExtensionContextb3b3a7b8fc36ea23
org.junit.jupiter.engine.descriptor.ClassBasedTestDescriptor6e33ab105860370c
org.junit.jupiter.engine.descriptor.ClassExtensionContexte804dacaeaef4a6a
org.junit.jupiter.engine.descriptor.ClassTestDescriptor2f87db51b4485e07
org.junit.jupiter.engine.descriptor.DefaultTestInstanceFactoryContextb1b7d61e94c58605
org.junit.jupiter.engine.descriptor.DisplayNameUtilse1e9919d0d67675d
org.junit.jupiter.engine.descriptor.ExtensionUtils722183e8696c5137
org.junit.jupiter.engine.descriptor.JupiterEngineDescriptor3d2dbddce296b041
org.junit.jupiter.engine.descriptor.JupiterEngineExtensionContext7146ce9988edfce2
org.junit.jupiter.engine.descriptor.JupiterTestDescriptor8af8f2d9d691826c
org.junit.jupiter.engine.descriptor.LifecycleMethodUtilsb0035c362bfe27ce
org.junit.jupiter.engine.descriptor.MethodBasedTestDescriptor27c3365cc0c4e908
org.junit.jupiter.engine.descriptor.MethodExtensionContextb5abe6523f4a32d7
org.junit.jupiter.engine.descriptor.TestInstanceLifecycleUtilsa247fc379f47df66
org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor6a00df6fbbc0ff9d
org.junit.jupiter.engine.discovery.AbstractAnnotatedDescriptorWrapper90b10f2d90d7b01b
org.junit.jupiter.engine.discovery.AbstractOrderingVisitorf8eb297929c247eb
org.junit.jupiter.engine.discovery.AbstractOrderingVisitor.DescriptorWrapperOrdererc8e1585f8474ed61
org.junit.jupiter.engine.discovery.ClassOrderingVisitor1f09fc1c6b9779bb
org.junit.jupiter.engine.discovery.ClassSelectorResolver47bba3d717485ecb
org.junit.jupiter.engine.discovery.DefaultClassDescriptor9064f3528773a161
org.junit.jupiter.engine.discovery.DiscoverySelectorResolver5dc6be896f50996f
org.junit.jupiter.engine.discovery.MethodFinder621c8591e557439a
org.junit.jupiter.engine.discovery.MethodOrderingVisitor7d9864cebac818e1
org.junit.jupiter.engine.discovery.MethodSelectorResolver679c52dec5ee3cd2
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType2ca704c5264882ae
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType.1b3bc3007a7dfdaa0
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType.2598aec8eeefe85e3
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType.3e8fd5325e2431a2b
org.junit.jupiter.engine.discovery.predicates.IsInnerClassd746bcff9a71ec26
org.junit.jupiter.engine.discovery.predicates.IsNestedTestClassf75dfd9ee2347890
org.junit.jupiter.engine.discovery.predicates.IsPotentialTestContainer909f14a1b9fe84dc
org.junit.jupiter.engine.discovery.predicates.IsTestClassWithTests34690a186bfcf3ac
org.junit.jupiter.engine.discovery.predicates.IsTestFactoryMethod941a8af0d47a68fd
org.junit.jupiter.engine.discovery.predicates.IsTestMethodf2039dbd13fce110
org.junit.jupiter.engine.discovery.predicates.IsTestTemplateMethodc13a4260435c18a8
org.junit.jupiter.engine.discovery.predicates.IsTestableMethod4be487dee199f633
org.junit.jupiter.engine.execution.ConditionEvaluatordf91d94b180fe511
org.junit.jupiter.engine.execution.ConstructorInvocation60b80968f2bdedc3
org.junit.jupiter.engine.execution.DefaultExecutableInvoker97f15d1e3151968f
org.junit.jupiter.engine.execution.DefaultTestInstances0fc6d90567826bc4
org.junit.jupiter.engine.execution.ExtensionValuesStoree4054d96e0311350
org.junit.jupiter.engine.execution.ExtensionValuesStore.CompositeKey66813dae6cf686fe
org.junit.jupiter.engine.execution.ExtensionValuesStore.MemoizingSupplierdf3ce2070a75daaf
org.junit.jupiter.engine.execution.ExtensionValuesStore.StoredValue57cb9ab75faabc0f
org.junit.jupiter.engine.execution.InterceptingExecutableInvoker42cb185ff5e76387
org.junit.jupiter.engine.execution.InterceptingExecutableInvoker.ReflectiveInterceptorCall7e154d03f7a732e5
org.junit.jupiter.engine.execution.InvocationInterceptorChain9798b2a812d2015d
org.junit.jupiter.engine.execution.InvocationInterceptorChain.InterceptedInvocation199eef1acbe0b316
org.junit.jupiter.engine.execution.InvocationInterceptorChain.ValidatingInvocationf064b1c2c4a4bf86
org.junit.jupiter.engine.execution.JupiterEngineExecutionContextb48cc2a96dab0116
org.junit.jupiter.engine.execution.JupiterEngineExecutionContext.Builderd1557432e23d2776
org.junit.jupiter.engine.execution.JupiterEngineExecutionContext.State3926323ef1c7fb03
org.junit.jupiter.engine.execution.MethodInvocation8b8fd00463d994df
org.junit.jupiter.engine.execution.NamespaceAwareStorec0df02c5fe61ed0f
org.junit.jupiter.engine.execution.ParameterResolutionUtils5aba48e342016f8f
org.junit.jupiter.engine.execution.TestInstancesProvider357bca6226069e7b
org.junit.jupiter.engine.extension.DisabledCondition1604b4e34c1363e4
org.junit.jupiter.engine.extension.ExtensionRegistrya610f9723b95715c
org.junit.jupiter.engine.extension.MutableExtensionRegistry8bb00bdafadd1c3a
org.junit.jupiter.engine.extension.RepeatedTestExtension32adc631c7f45534
org.junit.jupiter.engine.extension.TempDirectoryf1ee32424343082a
org.junit.jupiter.engine.extension.TestInfoParameterResolver3c520f8376f91ff7
org.junit.jupiter.engine.extension.TestReporterParameterResolver7187071bfc76c6ac
org.junit.jupiter.engine.extension.TimeoutConfiguration44b8593a8e980687
org.junit.jupiter.engine.extension.TimeoutDurationParserbb6a412c3829dae9
org.junit.jupiter.engine.extension.TimeoutExtension13bcdadb20fcc7bb
org.junit.jupiter.engine.support.JupiterThrowableCollectorFactory46546a446de4c9c0
org.junit.jupiter.engine.support.OpenTest4JAndJUnit4AwareThrowableCollectore9ee7d4e1adecdd1
org.junit.platform.commons.function.Try5200e6adc191344c
org.junit.platform.commons.function.Try.Failure5d1cf7b52cd7a7ea
org.junit.platform.commons.logging.LoggerFactory39fdfe1f67bc0eda
org.junit.platform.commons.logging.LoggerFactory.DelegatingLoggerc71dcf008235901c
org.junit.platform.commons.support.AnnotationSupport183c2f1d296c27a5
org.junit.platform.commons.support.ReflectionSupport945bcc92fedf115d
org.junit.platform.commons.util.AnnotationUtils192a2ed89eaed125
org.junit.platform.commons.util.ClassLoaderUtilsbf70ae4f9e1a53b8
org.junit.platform.commons.util.ClassNamePatternFilterUtils661df78b93e45465
org.junit.platform.commons.util.ClassUtils60a2276f3701443f
org.junit.platform.commons.util.ClasspathScanner54e3df9bb2092b52
org.junit.platform.commons.util.CollectionUtils8a03a781a6a5c2d1
org.junit.platform.commons.util.Preconditions2c2a6e13cda880d4
org.junit.platform.commons.util.ReflectionUtils94641233e0ff9c59
org.junit.platform.commons.util.ReflectionUtils.HierarchyTraversalMode3125245fc9d900bc
org.junit.platform.commons.util.StringUtils237c0cb03ac19254
org.junit.platform.commons.util.UnrecoverableExceptionse906a774e770e7d4
org.junit.platform.engine.CompositeFilter6a52e5b4f7292f48
org.junit.platform.engine.CompositeFilter.1cc0aadc5880fb4e4
org.junit.platform.engine.EngineDiscoveryListenerf7640d771a4374d6
org.junit.platform.engine.EngineDiscoveryListener.1a4cdbe8dd38d8f57
org.junit.platform.engine.EngineExecutionListener693fee5cbd4c2df0
org.junit.platform.engine.EngineExecutionListener.1999902b68f81dd9a
org.junit.platform.engine.ExecutionRequestb74e001541d12dd1
org.junit.platform.engine.Filter5ffaaa90df97ca04
org.junit.platform.engine.FilterResulta787a89e1f12d534
org.junit.platform.engine.SelectorResolutionResultb0cf35dcc829d3f4
org.junit.platform.engine.SelectorResolutionResult.Statusc505c2274f89f01d
org.junit.platform.engine.TestDescriptoraeaac58c9e7df241
org.junit.platform.engine.TestDescriptor.Type20fe3e02963cb4b9
org.junit.platform.engine.TestExecutionResult6b1b512d17bb680e
org.junit.platform.engine.TestExecutionResult.Statusad256e9fb4407e04
org.junit.platform.engine.UniqueIdf649a106c8945a6a
org.junit.platform.engine.UniqueId.Segmentf77d401d3f546230
org.junit.platform.engine.UniqueIdFormat6c86362ad62a1954
org.junit.platform.engine.discovery.ClassSelectora1cacad45a144508
org.junit.platform.engine.discovery.DiscoverySelectorsb6ccdda98704d231
org.junit.platform.engine.discovery.MethodSelector69292f007e74298d
org.junit.platform.engine.support.descriptor.AbstractTestDescriptorb9c965daf4d9a476
org.junit.platform.engine.support.descriptor.ClassSource37bd92069360f773
org.junit.platform.engine.support.descriptor.EngineDescriptor8f2f77769ee0e9c9
org.junit.platform.engine.support.descriptor.MethodSource1d55ac49f5cabc20
org.junit.platform.engine.support.discovery.ClassContainerSelectorResolverdc6114dc7e983729
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolution506a6b871d2fd8fe
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolution.DefaultContextdb18f59764ea1f2a
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolver687cbe6b3b72b453
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolver.Builder21b59a849a1e0107
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolver.DefaultInitializationContext1904819635770d62
org.junit.platform.engine.support.discovery.SelectorResolver0e255bcad7a1cf5c
org.junit.platform.engine.support.discovery.SelectorResolver.Match7260c13e30201f83
org.junit.platform.engine.support.discovery.SelectorResolver.Match.Type4294f41b496cc4ce
org.junit.platform.engine.support.discovery.SelectorResolver.Resolution721adb823179ce16
org.junit.platform.engine.support.hierarchical.ExclusiveResourceefa2e06c87a351c3
org.junit.platform.engine.support.hierarchical.ExclusiveResource.LockMode96e95d210b150f97
org.junit.platform.engine.support.hierarchical.HierarchicalTestEngine5c686da27ab7f7b0
org.junit.platform.engine.support.hierarchical.HierarchicalTestExecutor963cba9b029b4b19
org.junit.platform.engine.support.hierarchical.LockManager5aedd3bd3957b5a6
org.junit.platform.engine.support.hierarchical.Noded5630bd7243c23ff
org.junit.platform.engine.support.hierarchical.Node.SkipResult5aca1404ff0f9294
org.junit.platform.engine.support.hierarchical.NodeExecutionAdvisor7c2670c7a35cfba6
org.junit.platform.engine.support.hierarchical.NodeTestTaskf652d8cc5e11bdc5
org.junit.platform.engine.support.hierarchical.NodeTestTask.DefaultDynamicTestExecutorabd00dd511d28b2f
org.junit.platform.engine.support.hierarchical.NodeTestTask.DynamicTaskState22172225a9caa539
org.junit.platform.engine.support.hierarchical.NodeTestTaskContextbdf88cd3834282a5
org.junit.platform.engine.support.hierarchical.NodeTreeWalkerc689092b060d0b12
org.junit.platform.engine.support.hierarchical.NodeUtilsa7ec8f66d373c169
org.junit.platform.engine.support.hierarchical.NodeUtils.15a44a7e2cbf864b4
org.junit.platform.engine.support.hierarchical.OpenTest4JAwareThrowableCollectoraac6232f8fe482f5
org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService4021fb0b954634b6
org.junit.platform.engine.support.hierarchical.SingleLock2036ec8b92a38105
org.junit.platform.engine.support.hierarchical.ThrowableCollector6fd7a27676be3c50
org.junit.platform.launcher.EngineDiscoveryResult9f305fb9cafa070a
org.junit.platform.launcher.EngineDiscoveryResult.Statusc6f73a818e869b3a
org.junit.platform.launcher.LauncherDiscoveryListener4c7a9b5f0af6369d
org.junit.platform.launcher.LauncherDiscoveryListener.1d946f222ae757dc1
org.junit.platform.launcher.LauncherSessionListenere0db832b050d072e
org.junit.platform.launcher.LauncherSessionListener.144b3640faa83f474
org.junit.platform.launcher.TestExecutionListenerd5f44a91fb9bf46c
org.junit.platform.launcher.TestIdentifier2b393a1d76332bc4
org.junit.platform.launcher.TestPlan1c1994f8265f5a45
org.junit.platform.launcher.core.CompositeTestExecutionListener2fec5f997b539877
org.junit.platform.launcher.core.DefaultDiscoveryRequest5706e3938a47edbc
org.junit.platform.launcher.core.DefaultLauncher75b262c721c1b524
org.junit.platform.launcher.core.DefaultLauncherConfig6fbfe73d83f861ce
org.junit.platform.launcher.core.DefaultLauncherSessionc8ae22f36a4f9c66
org.junit.platform.launcher.core.DefaultLauncherSession.ClosedLauncher33b03a5d32880c72
org.junit.platform.launcher.core.DefaultLauncherSession.DelegatingLauncher62a46fcfba060cd0
org.junit.platform.launcher.core.DelegatingEngineExecutionListener98129d4f91790da1
org.junit.platform.launcher.core.EngineDiscoveryOrchestratore664ca6c3b9b649f
org.junit.platform.launcher.core.EngineDiscoveryOrchestrator.Phase268c73a2f40672ad
org.junit.platform.launcher.core.EngineDiscoveryResultValidatorae8e824d499c28c0
org.junit.platform.launcher.core.EngineExecutionOrchestratoref50d34e593c6435
org.junit.platform.launcher.core.EngineIdValidator6ec884e3f1252b64
org.junit.platform.launcher.core.ExecutionListenerAdapterb7c31393576744dc
org.junit.platform.launcher.core.InternalTestPlan69b2dd891a2eff73
org.junit.platform.launcher.core.LauncherConfig33646d7c20caa86c
org.junit.platform.launcher.core.LauncherConfig.Builder1a313fdb0cf517bd
org.junit.platform.launcher.core.LauncherConfigurationParameters3c045d9855c3582c
org.junit.platform.launcher.core.LauncherConfigurationParameters.Builderd4314d11c6458cba
org.junit.platform.launcher.core.LauncherConfigurationParameters.ParameterProviderdbf430fc5972aefc
org.junit.platform.launcher.core.LauncherConfigurationParameters.ParameterProvider.2fa4e3fee03856df9
org.junit.platform.launcher.core.LauncherConfigurationParameters.ParameterProvider.390f56b20ab147687
org.junit.platform.launcher.core.LauncherDiscoveryRequestBuilder75b65d32610aecc6
org.junit.platform.launcher.core.LauncherDiscoveryResultd1da1616bd553127
org.junit.platform.launcher.core.LauncherFactory8e309d53ca525395
org.junit.platform.launcher.core.ListenerRegistry4950f6c47b32949e
org.junit.platform.launcher.core.OutcomeDelayingEngineExecutionListener4c68ad66a29b4dd7
org.junit.platform.launcher.core.OutcomeDelayingEngineExecutionListener.Outcomeb6ca0889820c3cca
org.junit.platform.launcher.core.ServiceLoaderRegistryb9cb7c73b65895b8
org.junit.platform.launcher.core.ServiceLoaderTestEngineRegistryf98f04d3db2fcfbb
org.junit.platform.launcher.core.SessionPerRequestLauncher176a2050399cce8f
org.junit.platform.launcher.core.StreamInterceptingTestExecutionListener36972afd5e542435
org.junit.platform.launcher.listeners.discovery.AbortOnFailureLauncherDiscoveryListeneree6720edc40a9ccf
org.junit.platform.launcher.listeners.discovery.LauncherDiscoveryListenersd311082436d55ae9
org.junit.platform.launcher.listeners.discovery.LauncherDiscoveryListeners.LauncherDiscoveryListenerTypee18e1a0e62e22287
org.junit.platform.launcher.listeners.session.LauncherSessionListeners792ecbf10e49d607
org.objenesis.ObjenesisBase0c1d2fd83029257f
org.objenesis.ObjenesisHelper69c98e9d865aa4c7
org.objenesis.ObjenesisSerializere76a0838790f7d3f
org.objenesis.ObjenesisStdf35c83a75caea811
org.objenesis.instantiator.sun.SunReflectionFactoryHelperd17e7b3403696605
org.objenesis.instantiator.sun.SunReflectionFactoryInstantiator6156947e7d7c507c
org.objenesis.strategy.BaseInstantiatorStrategyb0aaa6460452f5ce
org.objenesis.strategy.PlatformDescriptionc6456f671febfd7c
org.objenesis.strategy.SerializingInstantiatorStrategy1a828beecea3b998
org.objenesis.strategy.StdInstantiatorStrategyabae05ba56ea35a6
org.slf4j.Logger.debug.2b5ae82ddf8995d2a
org.slf4j.Logger.info0387c47ebe50fdca
org.slf4j.Logger.isDebugEnabled.04bb27fd289679a67
org.slf4j.Logger.isInfoEnabled.3aecce68207916092
org.slf4j.Logger.warn.11f36b15fd6cebf00
org.slf4j.LoggerFactorya381b7ddf19bf47d
org.slf4j.LoggerFactory0786d15326352a65
org.slf4j.LoggerFactory.getLoggerbc56bb3c72901748
org.slf4j.bridge.SLF4JBridgeHandlera24ab9068b3f1049
org.slf4j.event.Level5d926b17279970af
org.slf4j.helpers.AbstractLogger0927772f80afa51d
org.slf4j.helpers.BasicMDCAdapter354fafb117483fdb
org.slf4j.helpers.BasicMDCAdapter.19cd7ee6a6ed765ce
org.slf4j.helpers.BasicMarkerFactoryd8e0b7e9d11b515c
org.slf4j.helpers.FormattingTuplef769e1b68746078d
org.slf4j.helpers.LegacyAbstractLogger9c0bab469712e43b
org.slf4j.helpers.MessageFormatterbd3b0d1c3cfdbf95
org.slf4j.helpers.NOPLoggerFactoryeaf704972ef7000c
org.slf4j.helpers.NOPLoggerFactory54f5632bfcb8d8d5
org.slf4j.helpers.NOPMDCAdapterd816a97d0b663014
org.slf4j.helpers.NOP_FallbackServiceProvider44c4aa253bad3620
org.slf4j.helpers.NormalizedParametersd9375a4f0639bb9b
org.slf4j.helpers.SubstituteLoggerFactory2c5fb1b0f92b644d
org.slf4j.helpers.SubstituteLoggerFactorydc7efc0107a4a62d
org.slf4j.helpers.SubstituteServiceProvider1caf06178d203dfd
org.slf4j.helpers.ThreadLocalMapOfStacks2b24a935616f8730
org.slf4j.helpers.Util857ff3acc0576435
org.slf4j.helpers.Util37cf666f1af3dd8e
org.slf4j.impl.StaticLoggerBinder6822bf7129d487fa
org.slf4j.simple.OutputChoicee0a1953327db924c
org.slf4j.simple.OutputChoice.1124019aa1ebae360
org.slf4j.simple.OutputChoice.OutputChoiceTypeb942f4749c05a605
org.slf4j.simple.SimpleLogger738dcbc04cbcd471
org.slf4j.simple.SimpleLoggerConfigurationbe40b5182f7eaf6d
org.slf4j.simple.SimpleLoggerFactory889732333cdc4789
org.slf4j.simple.SimpleServiceProvider99b66f9433a7c345
org.spockframework.builder.AddSlotFactory47a8966e48675214
org.spockframework.builder.BuilderHelper8cb4ee23998985ab
org.spockframework.builder.ClosureBlueprint67468f2a43cba42c
org.spockframework.builder.CollectionSlotFactory7712971048ff161d
org.spockframework.builder.DelegatingScriptfc3f099d620c9ff0
org.spockframework.builder.DelegatingScriptBlueprintf4b2b6e00d6f7c4f
org.spockframework.builder.GestaltBuilder22e49e1893b26db2
org.spockframework.builder.PojoGestaltf1fed7462e6378bc
org.spockframework.builder.Sculpturer416cfc78571a3d83
org.spockframework.builder.SetterLikeSlotdb452b55b232db1a
org.spockframework.builder.SetterSlotFactory08b0d770857aa3ae
org.spockframework.builder.SpockConfigurationGestaltb4d2217d5ccf666d
org.spockframework.compiler.AstNodeCache78b0345ad06c5d97
org.spockframework.compiler.ErrorReporterc638bf8765ea3a40
org.spockframework.compiler.SourceLookupda57855531113ffd
org.spockframework.compiler.SpockTransformb0b2e7b5160ee52e
org.spockframework.compiler.SpockTransform.Impld4d5a9abe49c559b
org.spockframework.gentyref.GenericArrayTypeImpla3c9503656316152
org.spockframework.gentyref.GenericTypeReflector2d2426d078b7d402
org.spockframework.gentyref.ParameterizedTypeImpl3704c3078502f916
org.spockframework.gentyref.VarMap49b43c23787020a9
org.spockframework.gentyref.WildcardTypeImpl6ef28d719d210278
org.spockframework.lang.SpecInternals377f6e381f79a10f
org.spockframework.lang.Wildcardffe9bfc07e2ed5ec
org.spockframework.mock.CallRealMethodResponse7e2f57191efc0fc3
org.spockframework.mock.DefaultCompareToInteractionea5fc92db5b20fc7
org.spockframework.mock.DefaultEqualsInteraction8208999c36155d5a
org.spockframework.mock.DefaultFinalizeInteraction7c261c56cb350f68
org.spockframework.mock.DefaultHashCodeInteraction35b7637db7deb27f
org.spockframework.mock.DefaultInteraction69d7ccbbe519ba76
org.spockframework.mock.DefaultJavaLangObjectInteractions2c3b2053420d606e
org.spockframework.mock.DefaultToStringInteractionfac3516a2507a85e
org.spockframework.mock.EmptyOrDummyResponse6cca9236e915e1c2
org.spockframework.mock.MockImplementation8e81e436747b9bed
org.spockframework.mock.MockNature55e68d585c3f25d8
org.spockframework.mock.MockUtil188d4d2d0ccf2347
org.spockframework.mock.ZeroOrNullResponse28396cb34ac55639
org.spockframework.mock.codegen.PrintStream.SpockMock.155898562260f68c0bc65b11fd
org.spockframework.mock.codegen.PrintStream.SpockMock.1558985622.auxiliary.2Wj8AkvSaac96abca4870d80
org.spockframework.mock.codegen.PrintStream.SpockMock.1558985622.auxiliary.bkVSZxm7893ffaa8ea3d974c
org.spockframework.mock.codegen.PrintStream.SpockMock.1558985622.auxiliary.vyAqomgKb60352b827fd61ed
org.spockframework.mock.codegen.PrintStream.SpockMock.1558985622.auxiliary.w9UYGQrh647cbed8c190c62e
org.spockframework.mock.constraint.CodeArgumentConstraint702a2728fa679f48
org.spockframework.mock.constraint.EqualArgumentConstraint975e7783b00547eb
org.spockframework.mock.constraint.EqualMethodNameConstraint312643211e8acf71
org.spockframework.mock.constraint.EqualPropertyNameConstraint9b8794fd6d332693
org.spockframework.mock.constraint.PositionalArgumentListConstraint316ff590393cf0b6
org.spockframework.mock.constraint.PropertyNameConstraint54383642627ccdcc
org.spockframework.mock.constraint.TargetConstraintdbadb8f561b4b64d
org.spockframework.mock.constraint.WildcardArgumentConstraint0b3351e8604acd0a
org.spockframework.mock.response.ConstantResponseGeneratord416d1ca82eb8d66
org.spockframework.mock.response.DefaultResponseGeneratorf137830e1e5827ea
org.spockframework.mock.response.IterableResponseGenerator7bc259a5db5ec37c
org.spockframework.mock.response.ResponseGeneratorChain10e417bf8aa94652
org.spockframework.mock.response.SingleResponseGenerator3e8c9058acf1f704
org.spockframework.mock.runtime.BaseMockInterceptorc7ccc665bb4a6e75
org.spockframework.mock.runtime.ByteBuddyInterceptorAdapter71a8cdce6418661e
org.spockframework.mock.runtime.ByteBuddyMethodInvoker85a24d28047f767e
org.spockframework.mock.runtime.ByteBuddyMockFactory16200dc5554499d1
org.spockframework.mock.runtime.CompositeMockFactoryb1d5d887c2db8500
org.spockframework.mock.runtime.DefaultMethodInvokerd51f6dc5f37480c5
org.spockframework.mock.runtime.DynamicProxyMockFactorya9b4cd135f3869d1
org.spockframework.mock.runtime.DynamicProxyMockInterceptorAdapter4639ee53210b990f
org.spockframework.mock.runtime.FailingRealMethodInvoker4e9d5311e667723f
org.spockframework.mock.runtime.GroovyMockFactorye477708d1d6b5512
org.spockframework.mock.runtime.InteractionBuilder34ebc6f4e9adef2a
org.spockframework.mock.runtime.InteractionScope452fae5ebc120b80
org.spockframework.mock.runtime.InteractionScope.1c47d591df2df11f4
org.spockframework.mock.runtime.JavaMockFactory42941c86e53fe3b9
org.spockframework.mock.runtime.JavaMockInterceptordb2e9b989115e40f
org.spockframework.mock.runtime.MockConfigurationffedc90555d74345
org.spockframework.mock.runtime.MockController336160410b9d026f
org.spockframework.mock.runtime.MockInstantiatore892fb38afe0c865
org.spockframework.mock.runtime.MockInstantiator.ObjenesisInstantiator602ea08b29fd65a6
org.spockframework.mock.runtime.MockInteraction637a57663b1ba0a3
org.spockframework.mock.runtime.MockInteractionDecorator00a2ae82d639360c
org.spockframework.mock.runtime.MockInvocation49596d842a5a09d9
org.spockframework.mock.runtime.MockObjectfcf8c7b47eea0f19
org.spockframework.mock.runtime.ProxyBasedMockFactory906165bfa8117a2e
org.spockframework.mock.runtime.StaticMockMethod2b98ff78d7ae026c
org.spockframework.report.log.ReportLogConfiguration39a92f3c9495992d
org.spockframework.runtime.ClassSelectorResolver9d7659603b79a2d0
org.spockframework.runtime.ConfigurationBuilderdae757d023df838f
org.spockframework.runtime.ConfigurationScriptLoader62ead2668fc64c42
org.spockframework.runtime.DataIteratorFactoryd6bf810efa2a6e08
org.spockframework.runtime.DataIteratorFactory.BaseDataIterator409be1224aa51ec6
org.spockframework.runtime.DataIteratorFactory.DataProcessorIterator0efd545bc1629677
org.spockframework.runtime.DataIteratorFactory.FeatureDataProviderIterator5493b0c886444c2c
org.spockframework.runtime.DataVariablesIterationNameProvider725b89b82856e8cb
org.spockframework.runtime.DefaultParallelExecutionConfiguration7a290da84977c982
org.spockframework.runtime.ErrorCollector6450e8d20a9049c5
org.spockframework.runtime.ErrorInfoCollector3c0e3d1e2d3f5eab
org.spockframework.runtime.ErrorRethrowerf68bfce336bcdc14
org.spockframework.runtime.ExtensionClassesLoader5f971a225c840b93
org.spockframework.runtime.ExtensionRunner9fcadadfdb7fcdbd
org.spockframework.runtime.FeatureNode57eb6dcdae3df23d
org.spockframework.runtime.GlobalExtensionRegistrya3bc8ebe1203ca40
org.spockframework.runtime.GroovyRuntimeUtil417a5bc1d380d789
org.spockframework.runtime.HamcrestFacadea2f79a4e4683d813
org.spockframework.runtime.HamcrestFacade.HamcrestFacadeImpld1133931bd892b1b
org.spockframework.runtime.IterationNode6804b212be47cfaa
org.spockframework.runtime.MasterRunListener08c7d4f58bdfd8da
org.spockframework.runtime.MasterRunSupervisor241a7e67a13990c0
org.spockframework.runtime.MethodSelectorResolver0ff0ed76993b56bb
org.spockframework.runtime.ParameterizedFeatureChildExecutor52161fc5493bc16b
org.spockframework.runtime.ParameterizedFeatureChildExecutor.10b3d9d3d7d917b10
org.spockframework.runtime.ParameterizedFeatureChildExecutor.3ab332244ee3237d6
org.spockframework.runtime.ParameterizedFeatureNode1b7bbd568b15f65f
org.spockframework.runtime.PlatformParameterizedSpecRunner9db753a1b1e33a49
org.spockframework.runtime.PlatformParameterizedSpecRunner.1dcc9af332e2b516f
org.spockframework.runtime.PlatformSpecRunnera46aed0815264244
org.spockframework.runtime.RunContext2ed2e4fdc5326bd8
org.spockframework.runtime.SafeIterationNameProviderb19cf500b6e5d264
org.spockframework.runtime.SafeIterationNameProvider.getName0c21d8d749da4835
org.spockframework.runtime.SimpleFeatureNodee74729828070c703
org.spockframework.runtime.SpecInfoBuildereb9677f06891fa06
org.spockframework.runtime.SpecNode797d05fa782844cf
org.spockframework.runtime.SpecUtilfc686272c2fabd9c
org.spockframework.runtime.SpecificationContext989cde1557d1b040
org.spockframework.runtime.SpockEngine3e09e72fffe2a6d5
org.spockframework.runtime.SpockEngineDescriptor7001a366a8c6a957
org.spockframework.runtime.SpockEngineDiscoveryPostProcessore0069892ddb75f57
org.spockframework.runtime.SpockException7fbb382a0c11eb15
org.spockframework.runtime.SpockExecutionContext2fb61928a8877dde
org.spockframework.runtime.SpockNode07082026a213167e
org.spockframework.runtime.SpockRuntimec4b53f11e998d765
org.spockframework.runtime.SpockRuntime.CollectionConditionb44858f0fb7f5c06
org.spockframework.runtime.SpockRuntime.MatcherConditiona1ccecdb385042f4
org.spockframework.runtime.StackTraceFilter2662fced060b1872
org.spockframework.runtime.ValueRecorder51810fc1676009e3
org.spockframework.runtime.condition.DiffedArrayRenderer36d8b8d58df43c2e
org.spockframework.runtime.condition.DiffedClassRendererb5494e05b24145d0
org.spockframework.runtime.condition.DiffedCollectionRenderer06c79a32e52233d1
org.spockframework.runtime.condition.DiffedMapRenderer47db8c34d0939e31
org.spockframework.runtime.condition.DiffedObjectAsBeanRenderer7b9014adfee4f84c
org.spockframework.runtime.condition.DiffedObjectAsStringRenderer734e7469c635c62c
org.spockframework.runtime.condition.DiffedSetRenderer0ca2d6c8fc7c9ba9
org.spockframework.runtime.condition.ObjectRendererService105fa14596625a7c
org.spockframework.runtime.extension.ExtensionExceptione9d43fcf919e317d
org.spockframework.runtime.extension.IAnnotationDrivenExtensionda083713f41de3b8
org.spockframework.runtime.extension.IDataDriver814f405afb0fd7d5
org.spockframework.runtime.extension.IGlobalExtensionde1246442ed0f42e
org.spockframework.runtime.extension.MethodInvocation3993301291a47dd2
org.spockframework.runtime.extension.builtin.ConditionalExtension5f06692b83010697
org.spockframework.runtime.extension.builtin.ConditionalExtension.ConditionInterceptor11a71f1c7a73eb18
org.spockframework.runtime.extension.builtin.ConditionalExtension.IterationConditionbae02784b4343abf
org.spockframework.runtime.extension.builtin.IgnoreExtensionec4bf8e0da10525d
org.spockframework.runtime.extension.builtin.IgnoreIfExtensione0ef9d502e6e1906
org.spockframework.runtime.extension.builtin.IncludeExcludeExtensionf2792789f803d11c
org.spockframework.runtime.extension.builtin.NarrativeExtension7ac35e087a9cc30e
org.spockframework.runtime.extension.builtin.OptimizeRunOrderExtension0493eed8dd6e0a2d
org.spockframework.runtime.extension.builtin.PreconditionContext448b764ac98da01f
org.spockframework.runtime.extension.builtin.PreconditionContext.DataVariableContextException48d60ed1d170ea44
org.spockframework.runtime.extension.builtin.PreconditionContext.PreconditionContextExceptionfca8e059c05d2d7d
org.spockframework.runtime.extension.builtin.PreconditionContext.StrictHashMapfbe2bc182cd73c57
org.spockframework.runtime.extension.builtin.TitleExtensioneccd433ef68a07e5
org.spockframework.runtime.extension.builtin.UnrollConfiguration7b4951b8776d057e
org.spockframework.runtime.extension.builtin.UnrollExtension2d35a2b93d53efee
org.spockframework.runtime.extension.builtin.UnrollIterationNameProviderd7f9a774e60b500e
org.spockframework.runtime.model.BlockInfod049bfafbd4f913c
org.spockframework.runtime.model.BlockKinde34ac40d5aa02fd5
org.spockframework.runtime.model.DataProviderInfoc778353b6cd45c98
org.spockframework.runtime.model.ExecutionResultb9ad2e146ffa0e33
org.spockframework.runtime.model.ExpressionInfodb7f7892fa83cc13
org.spockframework.runtime.model.ExpressionInfo.1b98a7c8bd72c0d43
org.spockframework.runtime.model.FeatureInfo52b003d99f8d3035
org.spockframework.runtime.model.FieldInfob524cdd69cec6be8
org.spockframework.runtime.model.IterationFilter7643dcd8fed88412
org.spockframework.runtime.model.IterationFilter.Mode09f35e287319784d
org.spockframework.runtime.model.IterationInfo3c7d04ef92330358
org.spockframework.runtime.model.MethodInfoe89ad50ee30f9bd4
org.spockframework.runtime.model.MethodKind92d6a841415e94d7
org.spockframework.runtime.model.NodeInfoa475da096cbf6c72
org.spockframework.runtime.model.NodeInfo.getAnnotation.0fc5783eeaf9d0ada
org.spockframework.runtime.model.NodeInfo.isAnnotationPresentcb1c5cb7af5b3118
org.spockframework.runtime.model.ParameterInfod360170e55191df7
org.spockframework.runtime.model.SpecElementInfo74804a9c0c501d2f
org.spockframework.runtime.model.SpecInfo1b46a9ea27e37b51
org.spockframework.runtime.model.SpecInfo.addListener90f644d85ed44ed6
org.spockframework.runtime.model.parallel.ExecutionMode980b87177b0b04ee
org.spockframework.tempdir.TempDirConfigurationb9212a2844c16220
org.spockframework.util.Assert4488a80988d72918
org.spockframework.util.CollectionUtileff814d62d8e7451
org.spockframework.util.DataVariableMap40d1013c985bb8f9
org.spockframework.util.DataVariableMap.EntrySet5da69698aedf0c20
org.spockframework.util.DataVariableMap.EntrySet.1ef9e579484ea6b3e
org.spockframework.util.ExceptionUtila5cf2d594cf055e3
org.spockframework.util.GroovyReleaseInfo3b9b7488e1adbab3
org.spockframework.util.Identifiers5cef89bc25bb327e
org.spockframework.util.InternalIdentifiers5c69bb1b2e50cd00
org.spockframework.util.MopUtil20ae12de4399a19d
org.spockframework.util.ReflectionUtil12ffbe8d8aa88a90
org.spockframework.util.RenderUtil3cd9c5fb6bb65eff
org.spockframework.util.SpockReleaseInfoe4df0c7b2676536a
org.spockframework.util.SpockUserHomeUtild56d47bede8c8650
org.spockframework.util.VersionCheckerf8fa9235b608e9ed
org.spockframework.util.VersionNumber3459768b13b6ad20
spock.config.IncludeExcludeCriteria17d2b6ac720a18d7
spock.config.ParallelConfigurationdc005a2536d487d0
spock.config.RunnerConfiguration35f230d4dce09c30
spock.lang.Specification598807043edbc59b
spock.lang.Specification.noExceptionThrown5229da1f3b402a42
spock.lang.Subject.valueb627439edd6e712f
spock.lang.Title.value43e0a155f7c776d8
spock.mock.MockingApi73f1186c71e822fe
st.Benchmark_System_Test50716aecf5eb4813
st.Benchmark_System_Test.__spock_feature_0_1_closure36533ab2cc57bfbaf
st.Benchmark_System_Test.__spock_feature_0_1_closure40b18831cd252e16f
st.Benchmark_System_Test.__spock_feature_0_1_closure5e6b546760c62acb2
st.Benchmark_System_Test.__spock_feature_0_1_closure62d5de52e8337cb96
st.Benchmark_System_Test.__spock_feature_0_1_closure6._closure88553549d68c0075b
st.Benchmark_System_Test.__spock_feature_0_1_closure766fda5426a0a31bc
st.Benchmark_System_Test._setup_closure17b0d958ffebc023c
st.Benchmark_System_Test._setup_closure2b9a8d83fb649b06b
st.Broad_System_Testcb59377a4b6d03d3
st.Broad_System_Test._setupSpec_closure1d1e7082272558eb6
st.NN_Concepts_Spec2d04d661cfc6a118
st.NN_Concepts_Spec.__spock_feature_0_0_closure18eea042aad3efdce
st.NN_Concepts_Spec.__spock_feature_0_0_closure29985bb998db42e79
st.NN_Concepts_Spec.__spock_feature_0_0_closure339af14f61d86df35
st.Training_NNs_Spec20093977416cbc3a
st.Training_NNs_Spec.__spock_feature_0_0_closure12eea2aedd176804b
st.Training_NNs_Spec.__spock_feature_0_1_closure2d8dc101221138edc
st.Training_NNs_Spec.__spock_feature_0_2_closure3bd3937525d850ff8
st.Training_NNs_Spec.__spock_feature_0_2prov0_closure450ea2cafeb40adbc
st.Training_NNs_Spec.__spock_feature_0_2prov0_closure5231ca7c688e2e3cd
st.Training_NNs_Spec.__spock_feature_0_2prov0_closure622653cddfb3be733
st.attention.QuasiMultiHeadAttention9f9520acecea1161
st.attention.QuasiMultiHeadAttention.runfcefd16deed56ab2
st.attention.QuasiMultiHeadAttention.train.02699e9aca52a7114
st.attention.ReductiveAttentionHead765a374d5beff8e4
st.tests.BroadSystemTest045148e6655f1a74
st.tests.CrossDeviceSystemTestcd266b538f1852b3
st.tests.SimpleNNSystemTest98a93358caa38d83
st.tests.SimpleNNSystemTest.Mode337b5d4b85367b3c
st.tests.SimpleNNSystemTest._closure13106a9e8a712934f
st.tests.SimpleNNSystemTest._closure21fe1f3f506a1f671
st.tests.SimpleNNSystemTest._closure326fef0294ee6f6d6
st.tests.SimpleNNSystemTest._closure4efafd6e7c22c2b64
st.tests.SimpleNNSystemTest._closure549614cf35f1e8aac
st.tests.SimpleNNSystemTest._closure67980c7bd5c7da224
st.tests.SimpleNNSystemTest.backprop.1cf996b1207af2046
st.tests.SimpleNNSystemTest.feedforward0ec97c18b77d7a8b
st.tests.SimpleNNSystemTest.on.3cae6d878a184f3f3
st.tests.SimpleNNSystemTest.sigmoid.031b4a1bb9d51a5d7
st.tests.SimpleNNSystemTest.sigmoid_derivative.2c38c055e7cf7c6e0
sun.text.resources.cldr.ext.FormatData_en_00122e898e372dfd5b8
sun.text.resources.cldr.ext.FormatData_en_GBfad409c2b9ea4375
sun.util.resources.cldr.provider.CLDRLocaleDataMetaInfo9ed83010eeaa402e
sun.util.resources.provider.LocaleDataProvider090384bcacb31f21
sun.util.resources.provider.NonBaseLocaleDataMetaInfo3286ba296d343b25
testutility.Loadbb6e5a5db249f307
testutility.Load.resourceAt939e23ef80a3c35c
testutility.Measured0a8504c2dc5693d
testutility.Measure.secondsdb4e45e87745a451
testutility.Sleep62fd22b85a31d651
testutility.Statisticsa7a09b019cdde552
testutility.UnitTestercb5f879a189300d5
testutility.UnitTester_Tensor5e2764f9eea88693
testutility.UnitTester_Tensor.TestBroadcast9a941242bed67588
testutility.UnitTester_Tensor.TestBroadcast.1bc352be590cf544a
testutility.mock.DummyDeviced6ac7580334fe8e5
testutility.nns.SimpleFeedForwardNN78ff99cc846a43e2
testutility.nns.SimpleFeedForwardNN.forwardfcd54c7a6c95710a
testutility.nns.SimpleFeedForwardNN.train.0200fc109de72cb2c
testutility.opencl.DispatchUtility6539ab5276397d51
testutility.opencl.DispatchUtility.findBestParamsf38b94a7a22f7aa7
ut.autograd.AD_And_Computation_Graph_Spec08745f15caef3c98
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure2e782be81c9aa40dc
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure30a28aeddf93ed880
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure48386bd48acbdd8b5
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure56282d93c9c128d9f
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure5._closure7acb0968050936cb5
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure64268d509d4df1df7
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure6._closure891697ff1b68b4899
ut.autograd.AD_And_Computation_Graph_Spec._setup_closure1e5a650169736e3cc
ut.autograd.Autograd_Explained0cf35b2ea5628086
ut.autograd.Autograd_Explained._setup_closure11d502b0134cd1a26
ut.autograd.Autograd_Flags_Explained6312bfee41502d33
ut.autograd.Autograd_Flags_Explained._setup_closure13f03a162a8089e3f
ut.autograd.Autograd_NN_Spec7e08bdaec2eeb621
ut.autograd.Autograd_NN_Spec.__spock_feature_0_0_closure2011095658562c5a8
ut.autograd.Autograd_NN_Spec.__spock_feature_0_0_closure373c97802daf6a498
ut.autograd.Autograd_NN_Spec.__spock_feature_0_1_closure4b944ea52322e6a3a
ut.autograd.Autograd_NN_Spec.__spock_feature_0_1_closure508e7004f2a18d37e
ut.autograd.Autograd_NN_Spec.__spock_feature_0_2_closure6a8ea350a7af3103d
ut.autograd.Autograd_NN_Spec.__spock_feature_0_2_closure7e38f12e8193c636b
ut.autograd.Autograd_NN_Spec.__spock_feature_0_4_closure8cb1c2fc9563ab9b8
ut.autograd.Autograd_NN_Spec.__spock_feature_0_4_closure9a4145108a2d51f7b
ut.autograd.Autograd_NN_Spec._setup_closure1244cb4dceaab5cfb
ut.autograd.Autograd_Tensor_Spec730525644be5244a
ut.autograd.Autograd_Tensor_Spec._setup_closure132549c12cad73a0b
ut.autograd.JITProp_Autograd_Tensor_Spec619d8a74d4db2730
ut.autograd.JITProp_Autograd_Tensor_Spec._setup_closure1e7e2f9deaabfba0d
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests8e77d813241278ae
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.__spock_feature_0_0_closure2be74e5b6cab619c5
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.__spock_feature_0_1_closure300daa2b23fa6fdcc
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests._setup_closure1b1d9911405708041
ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests0f49e5a15cf5023d
ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests.__spock_feature_0_0_closure1f5524b59f55855c8
ut.backend.Backend_Extension_Spec41e2082297d5b10e
ut.backend.Backend_MatMul_Extension_Spec6715224c039dad57
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure1be35cdec9da36e72
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2c68b7a82d5d72c70
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure1065479a8c4d3adfca
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure58ae418a84b15c3a9
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure6c6515e76c8b08b8b
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure71c00825c5ab60102
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure815c3f1ec341fd815
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure8._closure11f5db63934878e3ab
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure8._closure129baecad6efb8ca11
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure929bee642ce671099
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure3cfec2d5a9cbe7af6
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure484523ea43d263b05
ut.backend.Matrix_Multiplication_Specfc8ab65d5e66c8cf
ut.backend.core.Backend_Algorithm_AD_Spec4abab00c9b8a74fc
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_0prov0_closure17b24b722c8e81b3d
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_0prov0_closure2837b3e1bc77e6951
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_1prov0_closure33b1d751f71df64ba
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_1prov0_closure4ad92f1d7617d9df1
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_2prov0_closure51a4076016972a39c
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_2prov0_closure6332416b457d1400d
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_3prov0_closure79efda12899d4ae03
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_3prov0_closure85b051f4fea2b3f33
ut.backend.core.Backend_Algorithm_Implementation_Spec5e61fe08498bc7f4
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_0prov0_closure147e34fcc713b47fd
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_0prov0_closure273911c4d02e371a4
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_1prov0_closure316f3ad7bea7b9d03
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_1prov0_closure4470f985596bf1d53
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_2_closure58d8b3f2618cba792
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_2prov0_closure6937b612febe08093
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_2prov0_closure79a6643bb62120678
ut.backend.core.Backend_Functional_Algorithm_Spec1157df299fe82de6
ut.backend.core.Backend_Functional_Algorithm_Spec.TestAlgorithm2dd5774aab8d6d73
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_0prov0_closure15b8e9f84669a85ab
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_0prov0_closure21f14a5f216a7eb26
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_0prov0_closure36210fd1e6acf690a
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure1042e0103946f60d30
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure4a5bbdf3d41bce061
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure57e2f9d85ae798879
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure6ddcfa0d82123061d
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure73c51b7936c67bb22
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure813e7fd3297879f3e
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure9598b50ff81169ec4
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure11feb110fea316a164
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure123861ee1cb29a7423
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure130a67114608e0160a
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure143e480e1851aaa826
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure15c42f3057eab07986
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure15._closure18fb1ce9c34af851cf
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure16e1dd86c93cd9b084
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure16._closure1905fed407d48679a8
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure17cbb23385255f73b2
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure17._closure204286c5ac894bf37c
ut.backend.core.InternalMatMulTestebe35f24cd4aba16
ut.backend.core.InternalMatMulTest.Type58c059330c556d42
ut.backend.core.InternalMatMulTest.__fillIt32_closure2d4b6a2e99dc91bc5
ut.backend.core.InternalMatMulTest.__fillIt64_closure1da7b0110f8cf5163
ut.backend.core.InternalMatMulTest.__fillItI32_closure41b33181a560fb4a8
ut.backend.core.InternalMatMulTest.__fillItI64_closure3d99380fc097a28ee
ut.backend.core.Matrix_Multiplication_Spec6baeb3ce66f77e3d
ut.backend.core.OpenCL_Backend_Speccd0baee520755e6f
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure1e8dbc261f90cd0fc
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure2dd5bfd68a6a8274e
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure34289627106d92ccb
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure4154d466407d2b59a
ut.backend.core.Randomization_Spec97471c5562a15713
ut.backend.mocks.CLContext8cb709831e3f179d
ut.device.CPU_Spec7afd24ee49292f66
ut.device.CPU_Spec.__spock_feature_0_0_closure2e5d7956ea1e7f21c
ut.device.CPU_Spec.__spock_feature_0_3_closure38f9b855bd4f3341c
ut.device.CPU_Spec.__spock_feature_0_3_closure43bb0ae9d53605f15
ut.device.CPU_Spec._setup_closure1724ff49d2c7af5df
ut.device.Cross_Device_IO_Spece2f2bdf6cf316a91
ut.device.Cross_Device_IO_Spec.__spock_feature_0_0_closure174205112a8cd403b
ut.device.Cross_Device_IO_Spec.__spock_feature_0_1_closure2fd668633b57cb66e
ut.device.Cross_Device_Type_Spec3dfcb7a77bbb0db8
ut.device.Cross_Device_Type_Spec.__spock_feature_0_0_closure2567c51306a34e863
ut.device.Cross_Device_Type_Spec.__spock_feature_0_10_closure13b4e2ad5367643240
ut.device.Cross_Device_Type_Spec.__spock_feature_0_3_closure36fb6601783cfd1a9
ut.device.Cross_Device_Type_Spec.__spock_feature_0_4_closure413271ee1751f1b20
ut.device.Cross_Device_Type_Spec.__spock_feature_0_4_closure54be6673f460a5104
ut.device.Cross_Device_Type_Spec.__spock_feature_0_5_closure66485c5d8ce99cad7
ut.device.Cross_Device_Type_Spec.__spock_feature_0_7_closure77ee837b49c82ae12
ut.device.Cross_Device_Type_Spec.__spock_feature_0_8_closure1039b72a365fb50589
ut.device.Cross_Device_Type_Spec.__spock_feature_0_8_closure8ac252829bb8c4aef
ut.device.Cross_Device_Type_Spec.__spock_feature_0_8_closure9c2e4b3952686ca47
ut.device.Cross_Device_Type_Spec._setup_closure11f9863706106f885
ut.device.FileDevice_Speccff64b35ab00f412
ut.device.FileDevice_Spec.__spock_feature_0_1_closure22ede1d66a92bc5fe
ut.device.FileDevice_Spec.__spock_feature_0_1_closure3cf80fc25461d1fec
ut.device.FileDevice_Spec.__spock_feature_0_1_closure43c38f39b7ea83c85
ut.device.FileDevice_Spec.__spock_feature_0_2_closure594e6263bf26db987
ut.device.FileDevice_Spec.__spock_feature_0_2_closure671be37adfa564b97
ut.device.FileDevice_Spec._setup_closure1b120cec7479b9e37
ut.device.OpenCLDevice_Exception_Spec60c4e63292f036c5
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_0_closure109bed10d6c6dfcde
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_1_closure2324fdc9ee7328199
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_2_closure3fb68ac1926e4db6b
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_3_closure446769772a29c7acc
ut.device.OpenCLDevice_Spec1656c2c8ea5b5a31
ut.device.OpenCLDevice_Spec.__spock_feature_0_0_closure239ea6dc7d5eb3b45
ut.device.OpenCLDevice_Spec.__spock_feature_0_0_closure3ef065d3f8ed59dcf
ut.device.OpenCLDevice_Spec.__spock_feature_0_1_closure4173a3993442c71a1
ut.device.OpenCLDevice_Spec.__spock_feature_0_1_closure59d60d448ee149d22
ut.device.OpenCLDevice_Spec.__spock_feature_0_2_closure62a0c1c1c5b0fc41e
ut.device.OpenCLDevice_Spec.__spock_feature_0_3_closure7c2434462168e5647
ut.device.OpenCLDevice_Spec.__spock_feature_0_4_closure88843ab436f588349
ut.device.OpenCLDevice_Spec.__spock_feature_0_4_closure9248ab0c98830c20f
ut.device.OpenCLDevice_Spec.__spock_feature_0_5_closure10115c3c55c705834f
ut.device.OpenCLDevice_Spec.__spock_feature_0_5_closure117e6e3506ee58cb20
ut.device.OpenCLDevice_Spec.__spock_feature_0_5_closure12150ae3f242505e6a
ut.device.OpenCLDevice_Spec.__spock_feature_0_5_closure13fd3fb80e05fd1db4
ut.device.OpenCLDevice_Spec.__spock_feature_0_5_closure14df49e54205cd3e3c
ut.device.OpenCLDevice_Spec.__spock_feature_0_6_closure15a8ae03645101eac2
ut.device.OpenCLDevice_Spec.__spock_feature_0_6_closure16dc7548cc54f26012
ut.device.OpenCLDevice_Spec.__spock_feature_0_6_closure17e5b80ee2054295bb
ut.device.OpenCLDevice_Spec.__spock_feature_0_6_closure184ac8579982b406fb
ut.device.OpenCLDevice_Spec.__spock_feature_0_6_closure196749e58d0ea37f10
ut.device.OpenCLDevice_Spec.__spock_feature_0_7_closure206ad34198fdda24a9
ut.device.OpenCLDevice_Spec.__spock_feature_0_7_closure2101debea6cc0b5617
ut.device.OpenCLDevice_Spec.__spock_feature_0_7_closure2255c424bf92e2e482
ut.device.OpenCLDevice_Spec.__spock_feature_0_7_closure23950e623ae5332150
ut.device.OpenCLDevice_Spec.__spock_feature_0_7_closure24e6531ba218ddb902
ut.device.OpenCLDevice_Spec._setupSpec_closure1c833959446518ce2
ut.device.OpenCL_Specad6e0c2d6e32260c
ut.device.OpenCL_Spec.1d39116b5e41c6b41
ut.device.OpenCL_Spec.__spock_feature_0_0_closure1ade65a5ed10467e9
ut.device.OpenCL_Spec.__spock_feature_0_1_closure218b1bd51585ac442
ut.device.OpenCL_Spec.__spock_feature_0_2_closure319803471879b0665
ut.device.OpenCL_Spec.__spock_feature_0_3_closure4e171f9a1e367907d
ut.device.OpenCL_Spec.__spock_feature_0_3_closure5e548b9cbba6d194e
ut.device.OpenCL_Spec.__spock_feature_0_4_closure6c9f45d54086b2e92
ut.device.OpenCL_Spec.__spock_feature_0_4_closure72ae37fcaf5b5cdaf
ut.device.OpenCL_Spec.__spock_feature_0_4_closure7._closure9a82dd8ce32bb40f9
ut.device.OpenCL_Spec.__spock_feature_0_4_closure89bfcafdfaf499cd8
ut.device.internal.CLFunctionCompiler_Spec357b641cdd616c79
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_0_closure2102678a8963a5ce2
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_1_closure322b05a280cfb341a
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_2_closure4502bfd2781b9df9d
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_2_closure5f8d8e7a3e63dc547
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_2_closure649a21f713ff697fd
ut.device.internal.CLFunctionCompiler_Spec._setup_closure15ce553c099cacadd
ut.device.internal.CPU_Kernel_Spec4090d8903ff57e4c
ut.device.internal.CPU_Kernel_Spec.__spock_feature_0_0_closure1302a707f332dfd91
ut.device.internal.CPU_Kernel_Spec.__spock_feature_0_1_closure271518a4a2e37fb9d
ut.device.internal.CPU_Kernel_Spec.__spock_feature_0_1_closure32efde2598a015034
ut.device.internal.OpenCL_Data_Spec28f54e83295a371b
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_0_closure3a3e7ebacbb396531
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_0_closure432e92112d3b086fd
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_1_closure580ecc0b91c58310e
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_1_closure68a6da546510f6ceb
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_1_closure71c1286136a3da7b7
ut.device.internal.OpenCL_Data_Spec._cleanup_closure28d5c9d295097c6eb
ut.device.internal.OpenCL_Data_Spec._setup_closure19cb60d701e71497e
ut.device.internal.OpenCL_Kernel_Unit_Specbf29d937adccf9fd
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_1_closure1ed0389fbcd195f66
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_2_closure214868cf7ee176fac
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_3_closure3015a5fde5c15af8d
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_4_closure408e8177f6be51fd5
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_4_closure5561b355bde44c50b
ut.dtype.DataType_Spec939dd890ab7d71a9
ut.dtype.NumericType_Specb5ab0a5f2e3a66fe
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure16c51d42c2169ce02
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure101f59c369df70b6b9
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure26e575526c248cb2d
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure32766d7cb4094b375
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure417e4bf5ddd32cb5a
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure517212fedcc1bb0e5
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure6eb128c5902f0a917
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure717c835a31cbf3699
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure89c2ebb1fe80cc073
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure91f1e2f75fa30321b
ut.framing.Tensor_Framing_Spec7caf62b35fa9c407
ut.framing.Tensor_Framing_Spec.__spock_feature_0_1_closure2b292d593b4cd18b4
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure352af17a9fd70683f
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure4b10d2c7f62745e88
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure5e4ba97a355ee62c7
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure6549dfb91557bff19
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure77c7b9f9c327c56a8
ut.framing.Tensor_Framing_Spec.__spock_feature_0_3_closure825797d17670e2597
ut.framing.Tensor_Framing_Spec._setupSpec_closure156c353235b44bd23
ut.introductions.Tensor_NDArray_Spec3c705131045860b1
ut.math.BackendContext_Specc444f46ed252654a
ut.math.BackendContext_Spec.__spock_feature_0_1_closure18b209a67561795f2
ut.math.BackendContext_Spec.__spock_feature_0_2_closure2a21e6ab896f916e8
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure39011d2320bf01432
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure3._closure6c4a2081a59af8327
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure4a49e01ac61c92b83
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure4._closure72da19b7fc175eb37
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure5f9870d598dbc2bc1
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure5._closure83acd0ccd20dd1188
ut.math.ConCat_Specb6f99a1ad315340b
ut.math.ConCat_Spec.__spock_feature_0_0_closure11d5958c88d8447f3
ut.math.ConCat_Spec.__spock_feature_0_1_closure218ccf52603c7f15b
ut.math.ConCat_Spec.__spock_feature_0_1_closure35ea8cc27e97cdbeb
ut.math.ConCat_Spec.__spock_feature_0_2_closure4048866c755cfe936
ut.math.ConCat_Spec.__spock_feature_0_2_closure5df7c8724fe71387e
ut.math.ConCat_Spec.__spock_feature_0_3_closure689205826f1e66d3a
ut.math.ConCat_Spec.__spock_feature_0_4_closure7d8eb8f936f71af1f
ut.math.DummyFunction2cf2109f08dc2e94
ut.math.Function_Exception_Spec1294ef72b9d05e60
ut.math.Function_Parsing_Spec01ab92ce0ac23d4d
ut.math.Function_Scalar_Specc4066dbd807c2a47
ut.math.Function_Specbacd01b6b33ced6f
ut.math.Function_Spec.__spock_feature_0_0_closure144ee6a2e8726f07a
ut.math.Function_Spec.__spock_feature_0_0_closure1._closure45ebc75b17a4a02622
ut.math.Function_Spec.__spock_feature_0_0_closure2487c27a93f5092dc
ut.math.Function_Spec.__spock_feature_0_0_closure2._closure46b8359f6bd450e37b
ut.math.Function_Spec.__spock_feature_0_0prov0_closure3157999d5ee7969db
ut.math.Function_Spec.__spock_feature_0_0prov0_closure4d8102786accbfc4f
ut.math.Function_Spec.__spock_feature_0_0prov0_closure537a0419a6a210729
ut.math.Function_Spec.__spock_feature_0_1_closure6d314d4c48f64c9ec
ut.math.Function_Spec.__spock_feature_0_1_closure7930f46d055507ccd
ut.math.Function_Spec.__spock_feature_0_1prov0_closure1032b8c438116c5598
ut.math.Function_Spec.__spock_feature_0_1prov0_closure892ac2c34ae9bf3dc
ut.math.Function_Spec.__spock_feature_0_1prov0_closure936bc767c071d8d75
ut.math.Function_Spec.__spock_feature_0_2_closure118b678a5ec660182e
ut.math.Function_Spec.__spock_feature_0_3prov1_closure1228575b41bca4f2e5
ut.math.Function_Spec.__spock_feature_0_3prov1_closure137230ddd9c93e7ea1
ut.math.Function_Spec.__spock_feature_0_3prov1_closure143cb014420b67a797
ut.math.Function_Spec.__spock_feature_0_3prov1_closure15c3c839f6474dcd8e
ut.math.Function_Spec.__spock_feature_0_3prov1_closure163bad208486f6baec
ut.math.Function_Spec.__spock_feature_0_3prov1_closure17e122352a7bb63220
ut.math.Function_Spec.__spock_feature_0_3prov1_closure18a45e7027e5e7b274
ut.math.Function_Spec.__spock_feature_0_3prov1_closure1997e50cc604d2fc8f
ut.math.Function_Spec.__spock_feature_0_3prov1_closure2051e4d5cf28adcec3
ut.math.Function_Spec.__spock_feature_0_3prov1_closure213227159e420b2432
ut.math.Function_Spec.__spock_feature_0_3prov1_closure22f2db7854ed88bbf7
ut.math.Function_Spec.__spock_feature_0_3prov1_closure23cb03509d27274660
ut.math.Function_Spec.__spock_feature_0_3prov1_closure2469f8064a69b7c180
ut.math.Function_Spec.__spock_feature_0_3prov1_closure25fb24c5fab61b323c
ut.math.Function_Spec.__spock_feature_0_3prov1_closure265a6dd6d3208b7f80
ut.math.Function_Spec.__spock_feature_0_3prov1_closure278ab1754eb02eb7d3
ut.math.Function_Spec.__spock_feature_0_3prov1_closure2814df7dee6e2c94ae
ut.math.Function_Spec.__spock_feature_0_3prov1_closure29b9ac17f2d3c12e3b
ut.math.Function_Spec.__spock_feature_0_3prov1_closure307e1532e83cfa40bf
ut.math.Function_Spec.__spock_feature_0_3prov1_closure3134389a40ebe8d866
ut.math.Function_Spec.__spock_feature_0_3prov1_closure327d11c67addbb6fe4
ut.math.Function_Spec.__spock_feature_0_3prov1_closure336cd6411675ea59e1
ut.math.Function_Spec.__spock_feature_0_3prov1_closure344825ce1fede5d7bb
ut.math.Function_Spec.__spock_feature_0_3prov1_closure354e686ac5e7b8b5f9
ut.math.Function_Spec.__spock_feature_0_3prov1_closure366f9b617cb22c5e1d
ut.math.Function_Spec.__spock_feature_0_3prov1_closure37fb479248997c4012
ut.math.Function_Spec.__spock_feature_0_3prov1_closure38352ea7b7bc154aba
ut.math.Function_Spec.__spock_feature_0_3prov1_closure3962cfa347e81f17f6
ut.math.Function_Spec.__spock_feature_0_3prov1_closure407222f907964fb1e3
ut.math.Function_Spec.__spock_feature_0_3prov1_closure41658e4846c45e9f3b
ut.math.Function_Spec.__spock_feature_0_3prov1_closure4236c6032432ec8a28
ut.math.Function_Spec.__spock_feature_0_3prov1_closure43c196d2db25f5c5b9
ut.math.Function_Spec.__spock_feature_0_3prov1_closure443033436ae0e98c93
ut.math.Tensor_Function_Specc56f110d8d85b363
ut.math.Tensor_Function_Spec.__spock_feature_0_2_closure209dd0ebca69ba396
ut.math.Tensor_Function_Spec.__spock_feature_0_2_closure331154db009fa2989
ut.math.Tensor_Function_Spec.__spock_feature_0_3_closure46b485b3cfa7f6bcc
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure5fd44e5d621505d9e
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure6f9a0ce785a171f83
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure799e9a44f96c62b47
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure8e27823de24386c2c
ut.math.Tensor_Function_Spec._setup_closure1dcbd8bf574c0ebb6
ut.miscellaneous.Weired_NN_Spec86578dc4e7bffaee
ut.miscellaneous.Weired_NN_Spec.__spock_feature_0_0_closure2ed18a5a6c97f57de
ut.miscellaneous.Weired_NN_Spec.__spock_feature_0_0_closure343a97ea8e483199c
ut.miscellaneous.Weired_NN_Spec.__spock_feature_0_0_closure4c894782a24e617e6
ut.miscellaneous.Weired_NN_Spec._setup_closure1f314a107d5b9f347
ut.ndas.Nda_Assign_Spec50c02d895f6cb8f3
ut.ndas.Nda_Framing_Spec612ec6f770f33acf
ut.ndas.Nda_Inplace_Framing_Spec8b1bda01b858846b
ut.ndas.Nda_Instantiation_Spec6f33398e5cdbb497
ut.ndas.Nda_Instantiation_Spec.__spock_feature_0_1_closure10c1c471ff8051360
ut.ndas.Nda_Items_Spec19ef0f7c013cb875
ut.ndas.Nda_Mutation_Spec9fdf871133bde363
ut.ndas.Nda_Reshape_Spec2a045b8382db6990
ut.ndim.NDConfiguration_Speca536dc736112eef7
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure1c71d7aef87e8b166
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure2e621832f57038170
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure365e34d6756b581b3
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure4ba8c670e4f18e245
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure562e194464572793f
ut.ndim.Nda_Permute_Speccb5c46511a3a3970
ut.ndim.Shape_Spec41d5a91bda09f64b
ut.ndim.Shape_Spec.__spock_feature_0_3_closure134496b1cf5ad92d9
ut.ndim.Shape_Spec.__spock_feature_0_5_closure2b7facbf7c7910bca
ut.ndim.Shape_Spec.__spock_feature_0_5_closure313c2eb5ec5b6e7b2
ut.ndim.Shape_Spec.__spock_feature_0_6_closure463f41ebeea9b634d
ut.ndim.Tensor_NDConfiguration_Spece1aaa4b09a190394
ut.ndim.Tensor_NDConfiguration_Spec._setupSpec_closure18fa02757ef9c952d
ut.ndim.Tensor_Permute_Spece047ad990d77cdfe
ut.ndim.Tensor_Slice_Permute_Spec0931dc61a8ec8f5a
ut.ndim.Tensor_Slice_Permute_Spec._setup_closure12e4607dcb598c7a0
ut.neureka.Neureka_Spec21b921cbc2341905
ut.neureka.Neureka_Spec.__spock_feature_0_1_closure3af9eae889a211b2b
ut.neureka.Neureka_Spec.__spock_feature_0_1_closure494a9d9a23186d315
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure10b176483cbb2f258d
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure11fdf894ca4ce2b9df
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure122f510eff0dce79e6
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure13d5e966b9fe89e9f2
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure147827de83cb403aad
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure59adee60f99d70f35
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure681d78608abbd3117
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure77fe8c0a369db0b3b
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure88a744f82882906c3
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure992e6af731b509b3d
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure15de5942a68ed07c34
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure16dcc15456e227b74b
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure1733f164e890918e84
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure18378b0107b95cf2b3
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure198a37462304ad205c
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure2066f48c004f083859
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure2194b63ae1664d6544
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure229851b7baf2a8221a
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure230c3b223cc74bce75
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure24060e18fd78c543f6
ut.neureka.Neureka_Spec.__spock_feature_0_2_closure25642393b96a50ca66
ut.neureka.Neureka_Spec.__spock_feature_0_2_closure26def5dce6a507ebb6
ut.neureka.Neureka_Spec.__spock_feature_0_3_closure27e5996b45525d8bad
ut.neureka.Neureka_Spec.__spock_feature_0_4_closure28fbae4a20159584ae
ut.neureka.Neureka_Spec.__spock_feature_0_5_closure29298f35db71ee2693
ut.neureka.Neureka_Spec._setup_closure1d2a65d8f78648950
ut.neureka.Neureka_Spec._setup_closure261a7ac937705793d
ut.optimization.ADAM_Spec06ba96bfd70e10d4
ut.optimization.ADAM_Spec.__spock_feature_0_1_closure244772d89ac3ef3d1
ut.optimization.ADAM_Spec.__spock_feature_0_1_closure3eaf99076992b2c19
ut.optimization.ADAM_Spec.__spock_feature_0_1_closure496d69ad07c498db8
ut.optimization.ADAM_Spec._setup_closure158d018cd807cb1e0
ut.optimization.AdaGrad_Spec701a7b11d2ec5ccc
ut.optimization.AdaGrad_Spec._setup_closure157255791cca1efe7
ut.optimization.Momentum_Speceb39b3ecb78e56f2
ut.optimization.Momentum_Spec._setup_closure15142f649440b82da
ut.optimization.RMSProp_Spec113c92ea585460a4
ut.optimization.RMSProp_Spec._setup_closure18c7dd5e0cc11b8ad
ut.tensors.Copy_Spec13c21e0267f858e1
ut.tensors.Copy_Spec.__spock_feature_0_0_closure15722d393bbba2138
ut.tensors.Copy_Spec.__spock_feature_0_1_closure21be2e6eabfaf4d88
ut.tensors.Copy_Spec.__spock_feature_0_2_closure30b2cf927d420c6bd
ut.tensors.Copy_Spec.__spock_feature_0_3_closure43051119a1c8c4730
ut.tensors.Copy_Spec.__spock_feature_0_3prov0_closure5ff34a13e13baeac8
ut.tensors.Copy_Spec.__spock_feature_0_3prov0_closure690d7802e14490660
ut.tensors.DimTrim_Specd52d7c0c5866a111
ut.tensors.Expression_Based_Tensor_Instantiation_Speca07932c922f2b8c9
ut.tensors.Expression_Based_Tensor_Instantiation_Spec._setup_closure11eb5e93b258c86fc
ut.tensors.Fluent_Tensor_Creation_Spec37c0af7f1a80aa45
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure16783f791dc781b37
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure2d9a652d4ceccb201
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure33daf8888e0608c9b
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure403d71e1403b71579
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure529b52829019768f0
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure6f825f875ea3e50dd
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure72b5bf16c9bd1cba3
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure857560f26f088ef92
ut.tensors.Functional_Nda_Spec800366f91a8d2e4d
ut.tensors.Functional_Nda_Spec.__spock_feature_0_0_closure31bfb8af5a1b40db6
ut.tensors.Functional_Nda_Spec.__spock_feature_0_1_closure4b2f034e9b864a538
ut.tensors.Functional_Nda_Spec.__spock_feature_0_1_closure585cc69cc5bd953d4
ut.tensors.Functional_Nda_Spec.__spock_feature_0_1_closure681619790bf9e3c64
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure10372282db46129a21
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure1173c44718e5a3892a
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure128d0745b00fb39757
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure134f5ca64851eedeae
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure14e94d622ed91393bc
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure151f01f43fc620f158
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure16ea2aaeb5c3e5cfea
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure173c66838aa5302335
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure18e7f9d36b994926df
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure19c57b03e91c86546f
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure75f71f48bfe33bd3c
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure83317594d75bebe5a
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure9bd4f88e860d86012
ut.tensors.Functional_Nda_Spec.__spock_feature_0_3_closure2006cb0517b61af8aa
ut.tensors.Functional_Nda_Spec.__spock_feature_0_4_closure211f6c9d13bd9ab42b
ut.tensors.Functional_Nda_Spec.__spock_feature_0_5_closure2269ed28c31e6c4154
ut.tensors.Functional_Nda_Spec.__spock_feature_0_5_closure23c2860e618dcd6f92
ut.tensors.Functional_Nda_Spec.__spock_feature_0_6_closure2467ffc2bb8ba089b8
ut.tensors.Functional_Nda_Spec.__spock_feature_0_7_closure25ecc5aab77179d9c0
ut.tensors.Functional_Nda_Spec.__spock_feature_0_7_closure265ec624cad1dc75eb
ut.tensors.Functional_Nda_Spec.__spock_feature_0_7_closure276d8bb25a1e9dacae
ut.tensors.Functional_Nda_Spec._setup_closure1bd3120a2fbb64398
ut.tensors.Functional_Nda_Spec._setup_closure2e7950288b1033a74
ut.tensors.Functional_Tensor_Specc975d884d11eeb1a
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_0_closure5cdfad54e8ea29353
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure107e22bb0fa57c8570
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure1105bc7004d66ecde3
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure123439c5c4ed612555
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure132654e2e277b04f4a
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure14648cfb1b898ec421
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure15d07302d717f2c20c
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure167786be3d1a60dfba
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure1771fc57dcb8391083
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure18ac707e4e845d085b
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure1918d7aee546eb3068
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure6ca77484268170628
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure74cbc32631e79090b
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure8dd84a4754faab769
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure9f250d80f532e4cff
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_2_closure2060bb0fa63dca2125
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_3_closure219bb5cce81ecb8b6d
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_4_closure225845b6d920d9f2c5
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_4_closure234a6eb063fab4ad9a
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_4_closure2460a3a76541636a40
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_5_closure25d64f0325136d89cd
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_5_closure269658cebc63fccd2f
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_5_closure27b5daa5daaa2b353f
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_6_closure287790d64fc73d45b9
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_6_closure29f8f2b2fcbd0aaeb4
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_6_closure30195071a6f43ac7f9
ut.tensors.Functional_Tensor_Spec._cleanup_closure4b6f7bfdd93d0ff71
ut.tensors.Functional_Tensor_Spec._setup_closure132b0d2cbaa2c5769
ut.tensors.Functional_Tensor_Spec._setup_closure2e27a9bdc931d1630
ut.tensors.Functional_Tensor_Spec._setup_closure35d37d8a895c82f7f
ut.tensors.Reshape_Spec7f067d1cb90bd284
ut.tensors.Tensor_As_Container_Specf3ae6bcb3b859a38
ut.tensors.Tensor_As_Container_Spec.ComplexNumberd6d3fdcadac2bc4e
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_2_closure254ffda3c8237efad
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_2_closure3b1fc0f68421221f6
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure10952d8424e22c4707
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure11cfc6616746acd24b
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure12444877b1334194e3
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure13d677bf5594a5f6c6
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure14f4d4c776e2c63785
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure1588c6970790fde2ea
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure49f4d1b3921125361
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure53ff37c6ab12f7d23
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure685d982bc9db6108c
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure7d5a37c96fb8cadc0
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure805d6f853cd226ae6
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure926a9186c2cdff4fa
ut.tensors.Tensor_As_Container_Spec._setupSpec_closure18b960042dd04195f
ut.tensors.Tensor_Assign_Spec6c8637b869f2f2ea
ut.tensors.Tensor_Conversion_Spec55fb38ce3339d2f4
ut.tensors.Tensor_Conversion_Spec.__spock_feature_0_2_closure1484993f0bcb06ba6
ut.tensors.Tensor_Convolution_Specaca7b5793dd10257
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_0_closure347d8eea41bdefc66
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_1_closure4d7bf47e71c6cb60e
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_2_closure52f048fd0f149f4f7
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_3_closure6b1ebe48bbd6236f8
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_6_closure70b1c6c73b95a456d
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_6_closure8a0659011c202672c
ut.tensors.Tensor_Convolution_Spec._setup_closure127f7a86cbfbacadd
ut.tensors.Tensor_Convolution_Spec._setup_closure2c74984a0d7e99ca5
ut.tensors.Tensor_Device_Spec65bd3f394c6f2976
ut.tensors.Tensor_Device_Spec.__spock_feature_0_0_closure27cedecd60da90553
ut.tensors.Tensor_Device_Spec.__spock_feature_0_1_closure34e5d0910ab61d436
ut.tensors.Tensor_Device_Spec.__spock_feature_0_2_closure4b1eaae847a764ef0
ut.tensors.Tensor_Device_Spec._setup_closure196aac794a8744561
ut.tensors.Tensor_Dot_Product_Spec5510a47a6b1d94d0
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_3_closure179e5f20befefa227
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_4_closure215e3c286c1ed5b5f
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_5_closure383c58a9a5a4bbed8
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_6_closure4e1a4f7e6b392a958
ut.tensors.Tensor_Generics_Specc4b085b020c65334
ut.tensors.Tensor_Generics_Spec._setup_closure127870fe6d430b9ff
ut.tensors.Tensor_Gradient_Speca7512cd8002bf68d
ut.tensors.Tensor_Gradient_Spec._setupSpec_closure137f61b2422961367
ut.tensors.Tensor_IO_Spec7981edf538b9522c
ut.tensors.Tensor_IO_Spec.__spock_feature_0_2_closure22ad13155b491afdc
ut.tensors.Tensor_IO_Spec.__spock_feature_0_3_closure344ce1c5273430536
ut.tensors.Tensor_IO_Spec.__spock_feature_0_4_closure4a0f2cf911acb4f44
ut.tensors.Tensor_IO_Spec._setup_closure104a9bae9cfaeb18d
ut.tensors.Tensor_Instantiation_Specb4a7884545bc33f3
ut.tensors.Tensor_Instantiation_Spec._setup_closure1550e1e9842927fd0
ut.tensors.Tensor_Interop_Specfda7235f30fee448
ut.tensors.Tensor_Layout_Spec713ab0fa6c78f9c9
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure38fdece6ba2f0be27
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure40d80c115b73f2a34
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure51b4f01822a16ece3
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure6e9ec0040f1b7918a
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure75fce2458f615ee52
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure8104610feaf47241e
ut.tensors.Tensor_Layout_Spec._setup_closure14b57775b22d2499a
ut.tensors.Tensor_Layout_Spec._setup_closure2654fff6c8e9bbc18
ut.tensors.Tensor_Operation_Spece63a622730bc7130
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_10_closure73e99488d9812a245
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_10_closure8feaf834344b88fd6
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_10_closure9f08ffe708a26516b
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure10b1726fb884afa38d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure114b6f4946c8409873
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure12c231b35643385e73
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure130c0b0b4b97de7015
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure14cc92f88d09a1829f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure15684e59b3bbcc3fce
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure168afff7c1ff0b43b3
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure17e3f76b6a5ff5ea83
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure18dc949f29e1871214
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure1912b7f23ed8433dfd
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure202781bb0a97180889
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure21e9a2d61daedc2760
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure220a0a16f005001004
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure2342b709d078cbd943
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure2445ae79b2b7625d68
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure25bf2c5dd0ab1e13af
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure26467f655a8d81d1c3
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure275ac6d05cf34562b5
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure28e30a80c2f18f55e5
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure2991b1dce336438acb
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure30524652e6d8323fe1
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure313b6fb4910e40aeb7
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure32ac9a50d9ecbafcad
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure3362b93dced57ed344
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure342644286aa74f61a7
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure35e867457d9e8b4e4e
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure3633b88cb6c2abe6ab
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure375a916ac114d977fd
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure384457b13ff9a5fe12
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure39d36b4189f0ba123b
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure401779ffb9e2ceaccd
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure416c191c21325bece2
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure42cbd358cd5fe65eed
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure43b0b3bb558f731ec2
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure44150142ee142ffc2e
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure45377e3cd7f4617fc1
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure461dd5931e6a68010f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure47c1bffbe6554bff9f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure48ac61e369903b5273
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure49d70100f140ae125c
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure50bdb6bcea6d5a5956
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure51c6d65f72bdcf1979
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure523ee728aa64c6fb28
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure53e28d40525be505b8
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure546bb0773958d406b4
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure551d19c98a809e61a7
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12_closure56a2923a01c9765aa2
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure57819eab7092547523
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure58c1c78e7807f49fe7
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure59cf07b7514891f3bc
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure60dc2bf53a3c187412
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure61a77b9f62dbcb3a69
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_4_closure3e2072d779386fd06
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_4_closure4d8addf6198fd1884
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_4_closure5b6cf58f951cc2efc
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_8_closure654bd844b28966da4
ut.tensors.Tensor_Operation_Spec._setup_closure1cdb656bd4e1fe62b
ut.tensors.Tensor_Operation_Spec._setup_closure264f514eb0a2e5398
ut.tensors.Tensor_Slicing_Spec535cd4f808cd6b94
ut.tensors.Tensor_State_Spec90ad8e8b6a49d646
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure294702d101e207973
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure3852cd5a7ce79c797
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure4441506016081d1f7
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure5e328eabfdf2d67b9
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure6133af9301ca9518e
ut.tensors.Tensor_State_Spec.__spock_feature_0_2_closure7a58196962c6f3a66
ut.tensors.Tensor_State_Spec.__spock_feature_0_2_closure88e3115ce5799245d
ut.tensors.Tensor_State_Spec.__spock_feature_0_2_closure969fe2738e35980d2
ut.tensors.Tensor_State_Spec.__spock_feature_0_6_closure1094da049f1ea3a907
ut.tensors.Tensor_State_Spec._setup_closure1a43cfcb3baf414c3
ut.tensors.Tensor_Stats_Spec0df53dfaecb28f69
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_0_closure120ccabb9c01c3a14
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_0_closure24cb37e2bf4a4af5d
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_2_closure3ea30c6ef73472c71
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_2_closure49e1fc81c4cf0e68b
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_4_closure5d42bf00ed0aa5eb5
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_4_closure679f0b7aac74060fc
ut.tensors.Tensor_Version_Specbc4cc8e2cc8ab0ac
ut.tensors.Tensor_Version_Spec.__spock_feature_0_3_closure262dfd0df80e6cd9a
ut.tensors.Tensor_Version_Spec._setup_closure10769975e11b19d6e
ut.tensors.exceptions.Tensor_Delete_Exception_Spec633c153c6f43657f
ut.tensors.exceptions.Tensor_Exception_Spec63650038e6b9961f
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure13eaee5caf6238121
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure107b36b7d219da63b4
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure115c2cd8714e98b254
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure124d14787763e29aea
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure1342526cb2fddc4d3f
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure143811eb00a53b0098
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure1515a511a04e21fdc8
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure16af8a0e0b671a09ab
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure170759d3b475b3fb01
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure187aaf2ed5748b646c
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure268ffbb66b734da25
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure39b741d10916fca70
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure4b8d9461d72e701b3
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure50cd849f497fa7fe6
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure6b55bb5397f4d8853
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure7aa53dfc3a3d89b37
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure8ea3c64ac048486ee
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure9b9486b5dc9825d31
ut.utility.Cleaner_Testinga2d3ac288d3084de
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure19a4ded5259539abe
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure105f4397342e38973f
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure11fe3988c19fa6ac39
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure12a14a6c8b8e9e51fc
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure290eda09500b21a32
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure3ce59e563d0a34fd8
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure49446d18a5893f472
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure5caf2947c8882a198
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure65a3cb74e40f74d9b
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure70488f2b890e61871
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure89d1033b4e8d028f2
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure9c3a4764238c17d18
ut.utility.DataConverter_Spec6a1100bea63dda3e
ut.utility.FileHandle_Spec7a78b96680dc4770
ut.utility.FileHandle_Spec.__spock_feature_0_0_closure2350e5719c3f9df3d
ut.utility.FileHandle_Spec.__spock_feature_0_1_closure37b275ae8ce845eb6
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure43119a9569e5c0d6c
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure5fc2ba97c678acc89
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure67f478a4bab195789
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure7382615ab06fb0656
ut.utility.FileHandle_Spec.__spock_feature_0_3_closure8220488988dea9da7
ut.utility.FileHandle_Spec.__spock_feature_0_3_closure91dd0b540d620b2ae
ut.utility.FileHandle_Spec._setup_closure153ceff20553bb24a
ut.utility.ListReader_Exception_Spec125a46eede0db13a
ut.utility.ListReader_Exception_Spec.__spock_feature_0_0_closure179d18378eca8d81e
ut.utility.ListReader_Exception_Spec.__spock_feature_0_1_closure201e9d90bfc798495
ut.utility.ListReader_Spec2073b77dcf90f357
ut.utility.ListReader_Spec.__spock_feature_0_0_closure103c3ddf619f9a94b
ut.utility.ListReader_Spec.__spock_feature_0_1_closure2c259b2af2f7617ad
ut.utility.ListReader_Spec.__spock_feature_0_2_closure3d76faa725f3d84b8
ut.utility.Utility_Spec3ce8f5b3d0986ba0
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure14c6e2fde75fb4213
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure2b6f15d39621394d2
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure356977822adfdce11
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure48e83bd28804795ae
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure53e85ef1e042eecdb
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure6da8d2369c4a2926d
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure7a39dde6cafacc4ee
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure833bc5823131ab299
worker.org.gradle.api.JavaVersionf90f9a6989f59cec
worker.org.gradle.internal.classloader.ClassLoaderSpeccb374b01ccbebc0b
worker.org.gradle.internal.classloader.ClassLoaderUtils8203100709821636
worker.org.gradle.internal.classloader.ClassLoaderUtils.AbstractClassLoaderLookuperc285dc94ede87ba6
worker.org.gradle.internal.classloader.ClassLoaderUtils.Java9PackagesFetcher66503273ab6df058
worker.org.gradle.internal.classloader.ClassLoaderUtils.LookupClassDefiner101fed03f270a39f
worker.org.gradle.internal.classloader.FilteringClassLoader14e598cae38422c5
worker.org.gradle.internal.classloader.FilteringClassLoader.RetrieveSystemPackagesClassLoader130153a9bef1ed82
worker.org.gradle.internal.classloader.FilteringClassLoader.Spec5e483fa929e0db0b
worker.org.gradle.internal.classloader.FilteringClassLoader.TrieSete5b7cbe5af5f7d99
worker.org.gradle.internal.stream.EncodedStream.EncodedInput6e5f5782b741154c
worker.org.gradle.internal.util.Trie19fbee069a29feb3
worker.org.gradle.internal.util.Trie.Builder3ff89b3303eddda1
worker.org.gradle.process.internal.worker.GradleWorkerMain232767ef46e8d7ca
\ No newline at end of file +Sessions

Sessions

This coverage report is based on execution data from the following sessions:

SessionStart TimeDump Time
danpad-7975620aDec 16, 2024 2:36:05 PMDec 16, 2024 2:38:28 PM

Execution data for the following classes is considered in this report:

ClassId
ComplexNumber8fb0818c6ddd95ba
Example_Spec91dc0e81b596d72d
Kotlin_Compatibility_Unit_Testing2cd12523ca0371a6
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.14cda834c54637e45
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.107f656f0aa07894d9
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.117ffed6f01d0727e7
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.2ae6694b1a0018cd8
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.3c3ddbc65c549d80a
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.496b6bc8ee90c1f92
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.5c5fdd2e736565fb6
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.6f490b84c02030302
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.7ad99204a5577ed7c
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.81ae2c828cbc70e59
Kotlin_Compatibility_Unit_Testing.convenience_methods_in_function_API_are_consistent.95ca314e3cde39674
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.127253ef4b1241521
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.2bdc75261359abed0
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.3cc1f08545342d995
Kotlin_Compatibility_Unit_Testing.operator_overloading_works_for_scalars_in_kotlin.4d56086f1d7ec7a07
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.163ca0ee4ddcc49bb
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.2faff7d967f33d2ab
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.3e83d0386ebe7454c
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.4f59beea5c338d265
Kotlin_Compatibility_Unit_Testing.optimization_is_being_called.5c511e88900e0d948
Script19c54b91c17577725
Script1de0949e15f654f73
Script1914a85c2e875f4b1
Script1205a691e98d4dcff
Script195d9320a69ce0392
Script18d1e4263e4dea4fe
Script19c6ba707b646de54
Script1101d68a3170f7bdf
Script191aa2ce9c5206e06
Script1174bd15076dce958
Script11677070ea8b3e34d
Script181fd4397fc83e204
Script1b3dd695308938d7d
Script1fcad623da0efe112
Script1f22f8615c2d06975
Script179f745bbdef5c6e7
Script14a3273f286e01a98
Script1b2c1828c4a45d365
Script17fac691323fe9f8d
Script1fbd06b75c8a1f5b4
Script1a7681d979742028e
Script1486805d71fb5e139
Script1e6b1229453ee2f97
Script1a97c9b9c30f5badc
Script184be6bd5ca29dc47
Script17909eca7bd7913be
Script138472e3a0bede2a5
Script1._run_closure13b8ed65a524a2076
Script1._run_closure1ed5ed203393e2083
Script1._run_closure13b3c8c58ae8ea2c3
Script1._run_closure1._closure2a90afe75eb673bd5
Script1._run_closure1._closure20c46879dd7510abb
Script1._run_closure1._closure2._closure10fb2753db29297b2e
Script1._run_closure1._closure2._closure10._closure1658d554bdb4731069
Script1._run_closure1._closure2._closure111165daf974e46b3f
Script1._run_closure1._closure2._closure11._closure17fbc94ad69e4ad07f
Script1._run_closure1._closure2._closure12b6ec297f10c00e0c
Script1._run_closure1._closure2._closure12._closure186e26856bc65b08a5
Script1._run_closure1._closure2._closure13a7e46b6a001beee1
Script1._run_closure1._closure2._closure13._closure198dfe633a3fd197aa
Script1._run_closure1._closure2._closure3c170dd1bb41311ac
Script1._run_closure1._closure2._closure4ca5455fe981eea1c
Script1._run_closure1._closure2._closure5c56907b9f756ee1a
Script1._run_closure1._closure2._closure5._closure8b2c116fb6f730e4d
Script1._run_closure1._closure2._closure69cbf0571a9c6a2fa
Script1._run_closure1._closure2._closure6a834ac58239b50e5
Script1._run_closure1._closure2._closure75a7fca5cde9e2a24
Script1._run_closure1._closure2._closure81b670d749df82bed
Script1._run_closure1._closure2._closure96a166c285d2d66a1
Script1._run_closure1._closure2._closure9._closure153e9bf2ddf67218da
Script1._run_closure1._closure3d95219ef5d99d873
Script1._run_closure10784c15499a0cd7de
Script1._run_closure115f6e81c6173237ff
Script1._run_closure1236093c568071179c
Script1._run_closure13a07880d0f4f2bd7e
Script1._run_closure145422680f94a3630d
Script1._run_closure157300fc80199d832c
Script1._run_closure161bd046614d37dddb
Script1._run_closure173cf2d2eec0093dfa
Script1._run_closure1842d290ae4e7d1fa8
Script1._run_closure196ee038dd2b850b1d
Script1._run_closure2400146925cdf16ad
Script1._run_closure2097dac21d11796684
Script1._run_closure216d276f5146001c94
Script1._run_closure22d3969ff47c62ec31
Script1._run_closure23296b32b82b1b9621
Script1._run_closure241c2c772c4c9c8ec6
Script1._run_closure258f9d2edb18425006
Script1._run_closure26e6fa934b8f017065
Script1._run_closure27c1d807c4023f9044
Script1._run_closure28be4f42f54fa2cc82
Script1._run_closure29996dd67ac29c2ca3
Script1._run_closure3f97bc92a5953fb1b
Script1._run_closure30ea092487138e4b56
Script1._run_closure31baf514e726c495f0
Script1._run_closure32d392a977b187b593
Script1._run_closure33f4b03df83cb955b2
Script1._run_closure34015dd2569f01f555
Script1._run_closure35267f46d9123f1574
Script1._run_closure364f18fb49857c3517
Script1._run_closure3727c9bc41375353b6
Script1._run_closure3809a7c487470e3112
Script1._run_closure3954b9f32c573c1a8d
Script1._run_closure4b71e670241f57b1b
Script1._run_closure4020d045696a8d2f76
Script1._run_closure41eb8c0f8b3d1288be
Script1._run_closure50e64e8ba447996ad
Script1._run_closure675eb78724aeca076
Script1._run_closure7aaa905c37390d529
Script1._run_closure88dc1dfb9036abb23
Script1._run_closure934bb500106e65695
SpockConfig34d3db763b893a80
SpockConfig._run_closure172e3954bbfea85cb
com.athaydes.spockframework.report.SpecInfoListenera97cab13a8b6e037
com.athaydes.spockframework.report.SpecInfoListener._afterIteration_closure31638c56580500a23
com.athaydes.spockframework.report.SpecInfoListener._beforeFeature_closure15217cb8233ac2577
com.athaydes.spockframework.report.SpecInfoListener._beforeIteration_closure2eb71f2f057e152ba
com.athaydes.spockframework.report.SpecInfoListener._featureRunFor_closure6a7bed7b242c1b6ab
com.athaydes.spockframework.report.SpecInfoListener._featureRunFor_closure6._closure7e35f34384cfdf6e4
com.athaydes.spockframework.report.SpockReportExtension451c440f58245380
com.athaydes.spockframework.report.SpockReportExtension._start_closure179ab9f845c86cbb0
com.athaydes.spockframework.report.SpockReportExtension._start_closure2a72f7d158a65670d
com.athaydes.spockframework.report.SpockReportExtension._start_closure316d8b989d501c6f0
com.athaydes.spockframework.report.extension.InfoContainera5417b8e69a18b16
com.athaydes.spockframework.report.extension.SpockReportsSpecificationExtension3d09ca1e043e8803
com.athaydes.spockframework.report.internal.ConfigLoader58d7ac8d666e8758
com.athaydes.spockframework.report.internal.ConfigLoader._apply_closure1aded72b609de44b9
com.athaydes.spockframework.report.internal.ConfigLoader._loadDefaultProperties_closure6f84ac8d284fe02aa
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure273cfabd7d8647905
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure3d002c3f186b2826d
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure413928658e6184cd5
com.athaydes.spockframework.report.internal.ConfigLoader._loadSystemProperties_closure5f4790a6201280059
com.athaydes.spockframework.report.internal.FeatureRun51074b916a417d56
com.athaydes.spockframework.report.internal.ReportDataAggregatore6a94896a42b63fb
com.athaydes.spockframework.report.internal.ReportDataAggregator._getAllAggregatedDataAndPersistLocalData_closure191f640252ee6b4e5
com.athaydes.spockframework.report.internal.SpecDatab50b5bacd37c2ffe
com.athaydes.spockframework.report.internal.SpockReportsConfigurationadb41142397182ff
com.athaydes.spockframework.report.internal.SpockReportsConfiguration._addSet_closure17c2ac9e4c4ed6c91
com.athaydes.spockframework.report.internal.StringFormatHelper66ff3303f6b4cabe
com.athaydes.spockframework.report.internal.StringTemplateProcessorf0cfb2685fad28dc
com.athaydes.spockframework.report.template.TemplateReportAggregator7286274121fbaca0
com.athaydes.spockframework.report.template.TemplateReportAggregator._addData_closure1eeca57a159cabf52
com.athaydes.spockframework.report.template.TemplateReportCreator856fa7a706cf6cf8
com.athaydes.spockframework.report.template.TemplateReportCreator._createFeaturesCallback_closure14c9e7bde12d13ab8
com.athaydes.spockframework.report.template.TemplateReportCreator._createFeaturesCallback_closure1._closure73f177ab92b240a3c
com.athaydes.spockframework.report.template.TemplateReportCreator._createFeaturesCallback_closure1._closure7._closure8d675ffeeafe48220
com.athaydes.spockframework.report.template.TemplateReportCreator._handleUnrolledFeature_closure2ea25f43acc23f265
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocksFromCode_closure53e5d7b6eb74781d8
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocksFromCode_closure67105aa609a805ff7
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocks_closure3ca71dc293acf3be0
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocks_closure3._closure9cd05bbd17c347cdd
com.athaydes.spockframework.report.template.TemplateReportCreator._processedBlocks_closure4d4100d4d72e788b1
com.athaydes.spockframework.report.util.Utilsff6e3757be5f2f86
com.athaydes.spockframework.report.util.Utils._aggregateStats_closure5a48a8f40ca58cda3
com.athaydes.spockframework.report.util.Utils._computeErrorCount_closure47d12ef12ffa27f58
com.athaydes.spockframework.report.util.Utils._countFeatures_closure7e52a9a4a6a8e4e7b
com.athaydes.spockframework.report.util.Utils._countProblems_closure86edd78436bf73998
com.athaydes.spockframework.report.util.Utils._countProblems_closure8._closure157add3d758c87105e
com.athaydes.spockframework.report.util.Utils._getSpecFile_closure124733f46b25da05a1
com.athaydes.spockframework.report.util.Utils._getSpecFile_closure12._closure178349efeaa2d05b28
com.athaydes.spockframework.report.util.Utils._getSpecFile_closure12._closure1837f07bfd8c2027c0
com.athaydes.spockframework.report.util.Utils._isEmptyOrContainsOnlyEmptyStrings_closure605a731b33cf608b5
com.athaydes.spockframework.report.util.Utils._iterationData_closure109683306c0c97c3e1
com.athaydes.spockframework.report.util.Utils._iterationData_closure10._closure16372c79afd5855183
com.athaydes.spockframework.report.util.Utils._stats_closure18ba3b6dd56b5688b
com.athaydes.spockframework.report.util.Utils._stats_closure2c8ee9199e4aaf5d5
com.athaydes.spockframework.report.util.Utils._stats_closure3076db7a47801e1f6
com.athaydes.spockframework.report.util.Utils._stats_closure3._closure1437a376a72ad18a94
com.athaydes.spockframework.report.vivid.BlockCode4903eb17e34cdf86
com.athaydes.spockframework.report.vivid.FeatureSourceCode420ef5b98e0f55a2
com.athaydes.spockframework.report.vivid.FeatureSourceCode._addStatement_closure1f59e983fad473139
com.athaydes.spockframework.report.vivid.SpecSourceCodeb27a96a0b0601f71
com.athaydes.spockframework.report.vivid.SpecSourceCode._removeIndent_closure14a5dd54117a4c2e5
com.athaydes.spockframework.report.vivid.SpecSourceCode._removeIndent_closure1._closure4edf174f2b4dc4931
com.athaydes.spockframework.report.vivid.SpecSourceCode._removeIndent_closure23e762e7fbab0cca0
com.athaydes.spockframework.report.vivid.SpecSourceCode._trimLine_closure37845e6025c4a4078
com.athaydes.spockframework.report.vivid.SpecSourceCodeCollector6baf9d5606f1fa59
com.athaydes.spockframework.report.vivid.SpecSourceCodeReader4be68533b0b609d6
com.athaydes.spockframework.report.vivid.VividASTVisitor11479562245187ee
com.athaydes.spockframework.report.vivid.VividAstInspector1e357ada09e43a1c
com.athaydes.spockframework.report.vivid.VividAstInspector.AstSuccessfullyCaptured53f3648574ff645c
com.athaydes.spockframework.report.vivid.VividAstInspector.VividClassLoader6b7806ff38174ee0
com.athaydes.spockframework.report.vivid.VividAstInspector.VividClassLoader.1920e75a17cf3fcff
com.athaydes.spockframework.report.vivid.VividAstInspector._getSpecSource_closure1e2f113088ded10e0
com.esotericsoftware.kryo.io.Input82caa4ac8d2c9ad6
com.esotericsoftware.kryo.io.Output2e152e7951e62ecf
groovy.grape.GrabAnnotationTransformation06796280a35873c0
groovy.json.DefaultJsonGeneratore310e3b21a27d220
groovy.json.JsonGenerator.Options8e762222c38f67f9
groovy.json.JsonOutput16e22b9d1d296b2c
groovy.json.JsonParserType8b827f5f4c58f015
groovy.json.JsonSlurper87260cf2eea752c3
groovy.json.JsonSlurper.1f6bbc9ad480ad23f
groovy.lang.Binding1367f4e721f3d286
groovy.lang.Closureffa0c34a3dd1790f
groovy.lang.Closure.1d9cc3f30a2cf7d19
groovy.lang.Closure.WritableClosure906f8bb5c7cc0375
groovy.lang.DelegatingMetaClass1b481f94e54a4dd1
groovy.lang.EmptyRange76528e83d9d6fe09
groovy.lang.ExpandoMetaClass8e8832cb157de2e7
groovy.lang.GString17a41621d3a57cb5
groovy.lang.GString.1a7261e7d0e6e9e25
groovy.lang.GroovyClassLoader8218a0f70f71a9bd
groovy.lang.GroovyClassLoader.18ab9c388566e14c0
groovy.lang.GroovyClassLoader.277228909db19706b
groovy.lang.GroovyClassLoader.ClassCollectorce5b42ddb916a10c
groovy.lang.GroovyClassLoader.InnerLoader11a6c498a4442789
groovy.lang.GroovyCodeSourced6ab11532d9f9e34
groovy.lang.GroovyObject94784ca717e7c56e
groovy.lang.GroovyObjectSupport5624a8c57ac25a8c
groovy.lang.GroovyRuntimeException6c0faa82b3d52a5c
groovy.lang.GroovyShell1f96590c4a211a25
groovy.lang.GroovySystemb619a6cfadf1fc00
groovy.lang.IntRange36aa08f713abf2bb
groovy.lang.IntRange.IntRangeIteratorb77f13ed4b2eb3fa
groovy.lang.MetaArrayLengthProperty264e49ff1d00bf68
groovy.lang.MetaBeanProperty9d0a93a5fe25026b
groovy.lang.MetaClassImpl7cb475e0086c4e83
groovy.lang.MetaClassImpl.1dfdf5a824f0c25b5
groovy.lang.MetaClassImpl.1MOPIter1125bb7770baef14
groovy.lang.MetaClassImpl.2086955e0ad777889
groovy.lang.MetaClassImpl.3e973e9187c106590
groovy.lang.MetaClassImpl.549b4baebd4cd375b
groovy.lang.MetaClassImpl.DummyMetaMethodc3ccac497763b4ac
groovy.lang.MetaClassImpl.InvokeMethodResult8076d10812958eb4
groovy.lang.MetaClassImpl.MetaConstructorf1151e8c4d019afb
groovy.lang.MetaClassImpl.MethodIndexAction3055051edf4f50cf
groovy.lang.MetaClassRegistry.MetaClassCreationHandle059d54d5a5548aa0
groovy.lang.MetaClassRegistryChangeEvent584c183048bcaf59
groovy.lang.MetaMethodd54d9b8a3857884d
groovy.lang.MetaProperty87d1274d60089a0b
groovy.lang.MissingMethodException9dc9109b80636a36
groovy.lang.MissingPropertyException0d653f9fe50a1eab
groovy.lang.NumberRangea9de643f8efa69fc
groovy.lang.NumberRange.StepIteratorec0242b8a4dd9e93
groovy.lang.ObjectRangeb7da6d406bf22b62
groovy.lang.ObjectRange.StepIteratoracf0183bc46dec3a
groovy.lang.Reference37bb11c9d2954d63
groovy.lang.Script1636eb8f32e75226
groovy.lang.Tuple8bf53120b8d6587c
groovy.lang.Tuple236cf8a0f3d1c10f1
groovy.lang.Tuple330860412ec292fb3
groovy.text.GStringTemplateEnginef1758cba15ce0799
groovy.text.GStringTemplateEngine.GStringTemplatedea9760c1a24e134
groovy.text.TemplateEnginedfba0ddb64710583
groovy.time.BaseDuration177788ced2134410
groovy.time.Duration932bbe63739525c0
groovy.time.TimeDurationbd041132690b28f8
groovy.tmp.templates.GStringTemplateScript1d13b8c2cf4958472
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure107a6f62690bdcb55
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure10c814f656c7ec9e5b
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure1149b8e62f135e0a47
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure12bbe26d5ca15edde4
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13421a339058f30946
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure2623e6be6685b4be73
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure26._closure32bf828a031900d615
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure2763e491cb0a78c204
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure28f760883390801cc8
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure29f8b601629290b0ed
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure29._closure33bb843338f91c3ecc
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure30b984ebafa25ad074
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure13._closure310a165cc24bb818f4
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure22b90e5f3c4c7a4c0
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure3427e435036020221
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure4960fd2eeb3dc20b0
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure509ad8860caee372a
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure5._closure14351e5d84c3dcf450
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure6e9e2b1b6f6a8dbfa
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure715c7b46be358b404
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure885b217a8edaac048
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure9441cfffef47c2cf7
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure9._closure16ae1e36fdcde828df
groovy.tmp.templates.GStringTemplateScript1._getTemplate_closure1._closure9._closure174978e767fb277e43
groovy.tmp.templates.GStringTemplateScript10f8214c74a6b06230
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1c8b6987ca4396403
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure1071c772dcf7973162
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure11b4ab7cb1a78e6c32
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure1207ce84ddb556b127
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13e6a724b1175c0c27
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure263d2b1d112d0a980f
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure2748ab1b51b2ef6254
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure280376d59faeb5b698
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure29b3267f0f51103b30
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure30604763ef0f5fb295
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure13._closure31065cb4e3c4b87cd5
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure287e64f53d46bb8d6
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure38cb1cc6d13aeff58
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure4403b6d0927b93772
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure52455ee3cde79f138
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure5._closure14d952fc7a22f03ca2
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure604693c1a890ddb5b
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure70e7bb48a907adb6b
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure7._closure158d5339de943a4f5c
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure8b0332b17d57a9e1b
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure971097672c5ce2066
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure9._closure16dac7f33ab14ea6c7
groovy.tmp.templates.GStringTemplateScript10._getTemplate_closure1._closure9._closure17bc022852c37abc48
groovy.tmp.templates.GStringTemplateScript100762e711c99bbb55c
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure14e6d39946c690105
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure21476d4a7e4c59fd5
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure38c63ced73601d7fe
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure3._closure75bda58a2b5d61201
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure4c8ff4c7e54693642
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure58b756e2c7bd90334
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure5._closure8c6482b254569a1cb
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure5._closure99623bfaca106762a
groovy.tmp.templates.GStringTemplateScript100._getTemplate_closure1._closure6b2f6394a28f6823f
groovy.tmp.templates.GStringTemplateScript11aad5096aff4fe1ce
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1bbeab0e9264a6f5d
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure107e12e7ed403634f8
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure10._closure228d53ecc9c89d5615
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure10._closure23c3eb5ba2371d7840
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure117944a9249639cd37
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure11._closure24ebddd11d22a9acfc
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure11._closure253fbae380e98beb70
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure12215a34084c9ee980
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure131054beed3fc7920b
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure26c18b82ae95d0a23c
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure27caedbadb307c2458
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure284846787d0d8ef587
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure29173844bcc826adfa
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure307cae08980a23f6bc
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure13._closure3151840ed50f71d793
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure2f0f3548a38e37185
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure3fde74e1004fa632e
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure46c5979c8371c8b87
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure539776de0fb95e473
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure5._closure14f9b00a5b32f97a6a
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure65982579a3d31b90b
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure73d176b8560159513
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure7._closure1507e287096b06367d
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure80a7115ce13e350c5
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure99ee3b01b21414cf4
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure9._closure1616e0e0fbae9849dc
groovy.tmp.templates.GStringTemplateScript11._getTemplate_closure1._closure9._closure1720d88dcc080bd496
groovy.tmp.templates.GStringTemplateScript125dc9c648154f65cc
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure12e0ec957a0df72bf
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure106e6c58bf98d53a56
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure10._closure2202e0a06384b06791
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure10._closure23806062ffa91ca1b8
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure119f74d79bc4e12e39
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure11._closure24646e9db76e849d78
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure11._closure257c31dadd778a3288
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure124ae7e57646c60069
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13bb401009466b307e
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure26746a226e5cbeec68
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure26._closure32a6a2f187e76ee02f
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure27fc265844b7c9ee4d
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure2895178e5ae8c330a6
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure294b1a0868637d16a5
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure29._closure338bbdb9b909758790
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure305995b50105a73ac7
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure13._closure31a9edc08e532b2a59
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure269cc78e00d7a2a70
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure36e1cc8973d07c7b4
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure418ff448b06f24e98
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure51e10e98495a1dbae
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure5._closure149897103802e2b132
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure6bfbfeb1be1751ffb
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure768a20a9570a4479b
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure7._closure15283044716a42bd1f
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure874b756a4584903a6
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure91edcfaa10cd0f943
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure9._closure16f289d4b88ee378f0
groovy.tmp.templates.GStringTemplateScript12._getTemplate_closure1._closure9._closure1735b7636f55986df5
groovy.tmp.templates.GStringTemplateScript130f3d83564cb0e632
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure15d52e1c222ac79e1
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure1061b9cd8e2f743fcc
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure10._closure22e8719bfa40ab7712
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure10._closure23bee68a34dce3e910
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure11529b020ef5568f3c
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure126c7355a3bf0e58ce
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure134db38a556ef0ae52
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure2688cabdd1e464d65b
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure26._closure321ae9fae42549d105
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure277e60f9ce355aa841
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure28de2723b84bf873b9
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure29ef0433dbfa4b806f
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure29._closure334838e2191cecdb80
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure30457cde7600db7eee
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure13._closure31fe357ab898e2811f
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure21ed96339e1f2e323
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure31f4a4aea2a535bc2
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure4349d504a1657f26d
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure503326a58b04dcee5
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure5._closure14b875e61912ebf7fa
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure6e254809b55497dab
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure75bced59a80cb09e3
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure7._closure15a281faa6957ec43e
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure8cef5687d9ed0cd78
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure9f1363cc8e85f95d1
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure9._closure163eaec779913597eb
groovy.tmp.templates.GStringTemplateScript13._getTemplate_closure1._closure9._closure17a96dc6f19ee9052b
groovy.tmp.templates.GStringTemplateScript1403f0580dc14e6dc9
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1b5c63a2aadf5497a
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure104e91261a2913270a
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure10._closure22ad8639371cea0498
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure10._closure2307761044951f1248
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure11e3142ae56150e824
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure129d9c478a5277d3bb
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure135d694dc1b5327495
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure26afa963efce6270c1
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure26._closure324f18cacf6bbc46d1
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure2791b19d7bb8a27a67
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure289fb462152258bae5
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure29f35e91c135ca601b
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure29._closure33d2a2607974224ff3
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure3013e2ce331aaea231
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure13._closure31e93e5c38eb9ed1cc
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure2ebb2203466489d9b
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure3f9ebc5994efc8e81
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure4f1b33e0d652fc4a6
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure550dfe14c49c9a414
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure5._closure145ad924fe62d52782
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure6c3c4921859fc521a
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure7c3c8c8b551c7e28b
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure7._closure157795c28168cbabdb
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure8893bd070cf1da560
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure9aea26fd557f3922c
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure9._closure168a5bbc3ece151aa9
groovy.tmp.templates.GStringTemplateScript14._getTemplate_closure1._closure9._closure171f68be29eebf1f33
groovy.tmp.templates.GStringTemplateScript1551041d1398b1ee37
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1c69a12bf2f864224
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure104144b32b9eb22290
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure10._closure22471702aed8f1141b
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure10._closure2339f0f88fe0e05ae0
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure112efbff7050e74921
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure12bb08f75fabbf8b1c
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13ab9ad79d9da9eab9
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure265309fc5076b84af2
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure2713f73cf13a313c6b
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure28d484cff78163f9fa
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure295740aa72acfcf6d1
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure300f0ba5441fd2e618
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure13._closure31bee6e60e20577a8a
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure29ca73bed8ac054c8
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure388bd47e459a812f7
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure4ddd12acc758a7853
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure54dfd62906c25b15f
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure5._closure147a3bd2df72dc614a
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure69e2ff998edc0304a
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure7f0a417baa1a8acf3
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure7._closure15fd247c5697f7d2fa
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure83379eea909846bbe
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure94148a9bcb37cfebe
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure9._closure16467cafffd1c3f5b2
groovy.tmp.templates.GStringTemplateScript15._getTemplate_closure1._closure9._closure1783b21bb725ce77ed
groovy.tmp.templates.GStringTemplateScript16a618d23172b16a35
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1537e6b01a9135fc6
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure10513a0c7946512c3e
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure10._closure22c8a44e0494dc259f
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure10._closure237a7bc1d27ee18318
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure11c8cb81cf023faa2f
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure12d0b52621a1e762f5
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13008e7979e40548cc
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure26e6e85c90bfd604a6
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure27253cde6ebd84f67e
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure2809d539d0642e3cdb
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure290b62e6a607a74d8e
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure302a3018dd10562a63
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure13._closure31468f28557c0d8740
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure205981787bf590f3d
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure31b46c1636055b66d
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure4a977178f4464bd4c
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure56a9ae6f402118e82
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure5._closure141b1cc8bc42c7aa12
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure678124519318496ba
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure7a51176aab1197e7b
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure7._closure15d2f6bf2e96b35998
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure84dbfadc3422e38dd
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure9c177e3069eed4b09
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure9._closure16a2159bbcf1b8c49e
groovy.tmp.templates.GStringTemplateScript16._getTemplate_closure1._closure9._closure1796ddf514785dce8e
groovy.tmp.templates.GStringTemplateScript17f4ec972f2b4ee9cb
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1202243942b605498
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure105eef9948f1f029a4
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure10._closure222235759d50c7351c
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure10._closure2344fd29190b1ecbb0
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure110524545a33880b2a
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure12f62196f4582f3a52
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13f67de325cc9ed6e0
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure261a48c32f070c3e95
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure27a77a7fe43f17b072
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure2842e59432c7157fc4
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure29af7cdd159e91db44
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure3036d973aa152a6e4a
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure13._closure3111579263b7c42c06
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure2728d0c5e53d1c66e
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure36a10431e77012a1b
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure48515034e54c101b9
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure577b8652827fd9bc9
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure5._closure143bfe3e9d52ceecda
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure625f92e9985b8f4ea
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure7967da9a541763003
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure7._closure15584701f9698f20b9
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure8f7fd931a84b7f603
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure92e9d256f7a62279b
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure9._closure166e32887dee6e2b85
groovy.tmp.templates.GStringTemplateScript17._getTemplate_closure1._closure9._closure170a07508ab32ca650
groovy.tmp.templates.GStringTemplateScript18bf836486694c7dc3
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure13257dcd0b7a13ef1
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure100f6bdb514a9f1db2
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure10._closure22434b0b9e2c5ec28b
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure10._closure23b95af532ed1875a9
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure111bd5d0182a33641e
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure12836b02727b14741e
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13213bf6505380fd42
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure26a82fe0ecebdb4992
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure274a9e1705a6755233
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure288af3ba8ab76fae63
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure2933d7a29398a48d66
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure30870c385724bd93dd
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure13._closure31689965559af526e6
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure25f4e919cb02df24c
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure36605df85a90a1cea
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure4932bcb01a294d0db
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure5cd41f0ddf1195b60
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure5._closure146e454d72a2ba0ae3
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure63b32601f28eec9d8
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure7251d4cf51300a8aa
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure7._closure15c8decf616dd98653
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure8c222ddd9e1b4e8ed
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure97e5f453de1b544f3
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure9._closure167bff6d324ff9de1b
groovy.tmp.templates.GStringTemplateScript18._getTemplate_closure1._closure9._closure174ad704a498f1fabf
groovy.tmp.templates.GStringTemplateScript19ed77219830b3fe3d
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1410bf44535d235af
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure1000be4e60fd3e1828
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure11d63a058d1b84c51b
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure12a5ffb2a782dc2cb9
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13d7c86c0c7b1b636e
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure26548f7f53530173a1
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure26._closure329027b73db03e3a06
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure27c8d8b68f24e6143f
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure28c1c317681454ed7c
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure2997c9992001921bac
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure29._closure33a31888599b148325
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure309be5532021c1d7f4
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure13._closure313f41df63513c8da0
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure2285b8a455ca53b1f
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure317535df8be5e809c
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure4bf49dfc0b2316c2e
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure5d0637301d4f54e2b
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure5._closure144ea7bb53b2b34c2b
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure666d90b9f9cd2ab88
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure7167193fae36fe6d2
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure7._closure15426f71b692e5ff72
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure87860e300272d2633
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure991b58354053a2861
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure9._closure16b7d87ef3502f3100
groovy.tmp.templates.GStringTemplateScript19._getTemplate_closure1._closure9._closure17d60da13a53809261
groovy.tmp.templates.GStringTemplateScript2bef357ad83429109
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure12e19a19620e00db6
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure1099e9113da554e960
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure116bb91bc8783cb230
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure12f3eb4efd4b045e88
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure137321eeaf3ba43136
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure2651b239e2ff7d7645
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure26._closure32eb97a3fd7b143e12
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure274b02e78e99847b35
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure285a45b9b9c1637af6
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure29eff82a490baa486e
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure29._closure33a8f76a9d44df6e79
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure3011a2f8b11d415f6d
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure13._closure31c8c6780584683431
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure29a317653b0e9089d
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure3ba0016773072f43a
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure4f22ba0df3b064aab
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure536d2a9e315ed97a3
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure5._closure14f9fe4d3f159b6a25
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure678c8daee0327738f
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure7aee8851b6d99b900
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure7._closure152de897fb9608ddb6
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure8500c7d68a384f336
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure9d4bbffcc067041c3
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure9._closure16323e377070d4e0bf
groovy.tmp.templates.GStringTemplateScript2._getTemplate_closure1._closure9._closure1700a88cfe5108e1b8
groovy.tmp.templates.GStringTemplateScript20fbb650bb845a62b4
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure149437c051abff11e
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure1097f70c63a54fd26c
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure11bc6d4ccf18dcb4d1
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure11._closure24df0359c84cfe914e
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure11._closure2595ff8072c1eaa201
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure128995390ccb5ce9ce
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13a07c301ff325a085
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure264c5efcb1edc3f641
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure26._closure3226d03a5dc766eb29
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure2754edd0b32d68d79e
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure28341b84698950fb5d
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure29cdea5d4385bb608b
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure29._closure3390f38115c2799455
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure302e32585296503659
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure13._closure31bc44dd2d9fe42628
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure2190f707fbe5e218d
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure341c237eb949702fc
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure452dfcb346488d9b7
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure5818289b8ba17c507
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure5._closure14a3b3db6041c02769
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure69d8f01a608d19ffd
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure7dbae01eb806a6ab9
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure7._closure15d41ceb1dec3b0bd7
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure8e89ded54bf313448
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure9809949387fe3b1d3
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure9._closure16e7639a0ef26eddf6
groovy.tmp.templates.GStringTemplateScript20._getTemplate_closure1._closure9._closure17e4c747bc60272ff1
groovy.tmp.templates.GStringTemplateScript21a94215a5dda5e14a
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure13a1f549098ccfa40
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure109822995212eed7f6
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure117182995a296b15d4
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure12af0189d93294b169
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13568faa43dbbe3ea9
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure26b0fe630e5519cc72
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure26._closure329a9b313e0541da03
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure27d6ab7139affb9192
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure287f2b298b2a6bb842
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure2969f466f01c8df641
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure29._closure335376dab5d7e0c845
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure3032db3325932c7270
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure13._closure31eb9c671b542d8d6e
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure26e1a6ba652d6e8de
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure33094b59683c39e8a
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure47ebddff5742d6542
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure59ca00a649ffbd04c
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure5._closure1483512d4151c961a1
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure6c0646a26bcedfdad
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure7e8c2dee4700524c1
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure7._closure155ead55ca130772f6
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure852dfd38d79a8fa96
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure96f738f519b6cdd41
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure9._closure162b4489cfedb832ed
groovy.tmp.templates.GStringTemplateScript21._getTemplate_closure1._closure9._closure17781de222ab56472f
groovy.tmp.templates.GStringTemplateScript225e5eda8737a56548
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1affb2d2e1e59e7a2
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure10885c2600ca0dd958
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure10._closure22dcaf132f2efc4aa0
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure10._closure2314a3e9c6f482a061
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure1197b2e7e57bb3f6da
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure12c4bc58a738cc5880
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13fd9b04a7a2129cdc
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure26051fc3ce9c778226
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure26._closure32ee462c9a4328897c
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure27e06093a6284e5b87
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure28a27adfaccf267d63
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure2935d62a24b7d64d1e
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure29._closure33a7f93655e94b2c74
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure3017e08ebc9ca8be0b
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure13._closure3113f5a940087770a4
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure2f72547cc674fb32b
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure3a36f3311ba3e3a10
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure40a1be2b645c3a05d
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure5bbc78e00f1cfef91
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure5._closure14e276372261d2aaf9
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure62659d6a760a95b5d
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure7bd77bff460b4f649
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure7._closure15717f96b21243f994
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure82c1990e73202a9f5
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure9ef4cc5ebb6fd68f6
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure9._closure16cf2dbd8ccdc303c1
groovy.tmp.templates.GStringTemplateScript22._getTemplate_closure1._closure9._closure176d720c81f6c5fe4c
groovy.tmp.templates.GStringTemplateScript230caa9f996e5ae6b6
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1dca705bb9c2aecfc
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure108789b3317dacdcc2
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure115a5d32704a0457df
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure12e228e872c1040027
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure130b689efb8a8902f0
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure26f9bf5c7124adb815
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure26._closure32520d27f9810fb856
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure276226322caadd1d8b
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure28e94a724e6c1d3e7c
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure2991c811972ee0dbd4
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure29._closure33647c6df5fcd27064
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure300b09e5cb99d4fa22
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure13._closure31442d1376c3bedbe2
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure280305c158bc77a78
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure3d239b16cad6aa666
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure42679f67755661ca8
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure5a6e50ddcd423fada
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure5._closure14c294c10371dbec31
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure67bb2bd27d495390d
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure78e1b60fb90dbb831
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure7._closure15fbce2865ed7f80b5
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure8965bae3ef49b672b
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure900a6038252720464
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure9._closure16030aae4dd215ecda
groovy.tmp.templates.GStringTemplateScript23._getTemplate_closure1._closure9._closure17f1a8a91f3db49692
groovy.tmp.templates.GStringTemplateScript24006744c2e3a46d4d
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure13433de531373dc67
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure10a8a158a57bcbc404
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure11ebd21a9bde0230c7
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure1213c7fa5b2c7d8b52
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure131bb2596f514bd837
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure26dedc824f0eab1e8f
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure26._closure3207fc17d2cffa2f82
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure278df756992725cfad
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure28a8d933e305bdf720
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure298d92b38de1613ba0
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure29._closure33fee6ef95941ce417
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure305d97f58e83a126fd
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure13._closure31532635f6b0c28b31
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure2755b1f180c7d04c0
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure334983e1fc9c57325
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure4e3579830261e2a63
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure5f50886c82da7902b
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure5._closure14203803e401e53c49
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure65a22afa4d82016bc
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure7161d7dd441d75359
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure7._closure152eda104210caef50
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure8d1951633a5560f33
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure95f32509fedde0399
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure9._closure16b7ffd50a8d356198
groovy.tmp.templates.GStringTemplateScript24._getTemplate_closure1._closure9._closure1747add1c74de28c8a
groovy.tmp.templates.GStringTemplateScript25529301dcba5beeb3
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1476ff6c69100d739
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure10a774cd94cc6ac19e
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure11263dcf0eefb591c2
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure1235534a8ed5b5d3f5
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13ed41c33379d0461b
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure26227c1df0b67124bc
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure270fb1f713a5b689a1
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure28e3e99e01a686b43f
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure29298c883e7857ad6a
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure30417e9ef986dd62d4
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure13._closure3104fe8fc07b0b2077
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure2024e04c1e0f5cd93
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure345cebc62de91ef53
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure4cf358cf136bb9696
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure5e82a0514084b8560
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure5._closure1400daf5c511ec7a81
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure607c9c4246c1c74ec
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure72571a2dbb1b81d21
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure7._closure15a46bae95eff69671
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure86bd728ea63cfc1ed
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure9b0d896f609516f0b
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure9._closure167bd8c6cb92e38e83
groovy.tmp.templates.GStringTemplateScript25._getTemplate_closure1._closure9._closure17db7774598693e454
groovy.tmp.templates.GStringTemplateScript26a58fcefe505b6ab1
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1d28b8f781795cadb
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure10b70a72c61489cf30
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure11c00db1b1bd6d72cc
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure125eee9bf0dfed3a1c
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure1346556dd7007ce46e
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure26979dbd307f1f6ae8
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure26._closure32cf6a01154bb44dd7
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure27397a158c220343b4
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure283eb8682643cb711e
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure2975aec4ead30c1635
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure29._closure33c9ec58d5bf2e5c36
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure30644523608959aeaf
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure13._closure31fc97419b2751ddbd
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure29b7128abd56c9666
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure3d6353ae5e76c4bc9
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure4bb93b1b207555389
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure5cf4d8170667fbabd
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure5._closure1461fdefa621f7b1d9
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure6e1f478a5b058d21c
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure770c4c3cba109cfa9
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure7._closure158bb96dedeeb21d13
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure815116b802865928e
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure930e7dc4c24c0dabc
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure9._closure169fb1f288b298bfaf
groovy.tmp.templates.GStringTemplateScript26._getTemplate_closure1._closure9._closure17ce189afadb005d37
groovy.tmp.templates.GStringTemplateScript27f77b8be009a4e94f
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1a1d7a7ed95e6c185
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure10b8dfe7f7a328caaa
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure10._closure22fc7ac6d1fa8b182d
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure10._closure23d03ea2205680ca69
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure110de264248cdad3c9
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure11._closure249af4fb0510bfe2c4
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure11._closure252c6f1a0288165959
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure12787a2b25262562bb
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13b0a6f78b28e77a42
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure266b3d228fc7c550db
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure27bb3cb406a09005b8
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure287588c5c4e0f03201
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure29d1b0ff594a3a80ff
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure3078ac48178c25ea86
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure13._closure31ab4ffbadec9876fb
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure2ec64337239e45f35
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure3a763b898f038d7bf
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure497f1a57317f0ef7c
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure5d26f02ac4393aff6
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure5._closure14411f198731fef711
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure6bc1f13250464b04c
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure743a81cc4516681d1
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure7._closure150108d33a118e6432
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure8af535559eefc5c50
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure9df0d1a25c04fb62e
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure9._closure165396e149ad4e50b4
groovy.tmp.templates.GStringTemplateScript27._getTemplate_closure1._closure9._closure1752c23f64107135e9
groovy.tmp.templates.GStringTemplateScript28bc1478494ba67d47
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1b3a238a90927abec
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure10e95ba5ee1847febc
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure111313e0669561bcfd
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure120d30bfa3051e2cf7
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure1367e0e2feb7f951e0
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure26d95a014c2b1227dc
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure26._closure3264886143d65f627f
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure2756d8dce739f2e7f9
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure28bd9eeb7c908ae3a6
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure294d1b80df4c0fd6dd
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure29._closure334cd95c156eb374d1
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure30c97903eabdb21711
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure13._closure31d2810c9bc1a97c1b
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure2c1a7aeb0da186b17
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure3ab7624032e33e14e
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure481cf6d3ce1a53e1e
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure56896975995776f5f
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure5._closure1414a46a68c18a1128
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure6a2d45da3a9328d7e
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure7f0c8f99403101978
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure7._closure1591911da215d8c2d8
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure89a8c1b9a8bff42be
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure98fcf7a775b98d546
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure9._closure16465b04060cd9a52a
groovy.tmp.templates.GStringTemplateScript28._getTemplate_closure1._closure9._closure1712126b4a3bac6906
groovy.tmp.templates.GStringTemplateScript29eee03d571259feb9
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1c0fe103c8b54a0b2
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure10e68e30dfafe6fb26
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure10._closure227795834b4209ff39
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure10._closure23131f96c0c5793cd8
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure11defc35f3a4d61df8
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure11._closure24111bbe9fa83d05d0
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure11._closure25ef4e2ee21befafe8
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure122ba40f76fcd67450
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13911378a29f62cfcc
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure2625fa9ef393c81def
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure26._closure32d8c36a2014785355
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure27d49e7d6dbb61a1f5
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure28f6ae469e33b1a0b9
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure29e905bb6cd5394017
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure29._closure338f5c07b57b2a28c1
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure30d590689db8ce5338
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure13._closure318559b6ad0a60d75d
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure2b6b2b5693690a244
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure3da20a67e39677d38
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure4adad79fdf10082eb
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure575b41485b09b7a14
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure5._closure1434469c49d18357e0
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure6ff3f36231d0eef2e
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure7c3a4269bf37f5700
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure7._closure151b20a375eae4bbf9
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure820ce25434d668c60
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure96025bc1ebf17b9d4
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure9._closure168a7c17c7130f4a31
groovy.tmp.templates.GStringTemplateScript29._getTemplate_closure1._closure9._closure178ec8ced4f0dd01d8
groovy.tmp.templates.GStringTemplateScript39bb4e12d51f06220
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1a68c93064f2b4fe8
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure1039424c1b7b3cc476
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure10._closure22083499c58459539d
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure10._closure23337e9182be2e83b7
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure1175b9b0955ee2da1d
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure12cbec506212cddfac
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13f3c8a5ba1a96d919
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure26ef81bb61293a31a8
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure26._closure32d864bb575ae799ef
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure27c35f35b2172fec25
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure28aea6a93ff1c258e3
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure29e2c233507cbc1fef
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure29._closure3336265dfe2f9e5e15
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure307640f6448848259a
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure13._closure31197664473ed82f8d
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure2f551f8cc630c9356
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure3edd5da95cda2a633
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure441c87130434f935d
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure523f849625f130824
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure5._closure142da1bd565859e009
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure6982efc26505deba3
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure7c7f26a34e826bdfc
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure7._closure155e53b5f4eb660dd5
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure88c99a4d766611de3
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure934d90022578b9ad0
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure9._closure16d621c80b1bc0a760
groovy.tmp.templates.GStringTemplateScript3._getTemplate_closure1._closure9._closure17a818aa76c8ed94ee
groovy.tmp.templates.GStringTemplateScript306ac4a4fe9a039d37
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure13610202d8f3d8215
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure105a18d9f694f87369
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure10._closure2263b7f5273f0e70b7
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure10._closure238a10beb8d409ce79
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure11bbd0a31a8ded0370
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure1263a3adbc1ea52196
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure139dcac385af0d3b1b
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure2663725c2e527b2c7b
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure26._closure321e8c7156a4a4cc18
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure275f2f9612a7ea44d8
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure2826c0b4c46bf3c01e
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure2977ae43783622561d
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure29._closure338b30044e626c0d09
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure30141eb139e1554a1d
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure13._closure31d5b30597a92fef83
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure26ca8656467b2a944
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure305136169e9805660
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure4cc83a920a5987c0b
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure5e2cfab3b66322912
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure5._closure141a13399660d02e2f
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure6ead2eacd8865a39f
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure7971d6d348f9a05f7
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure7._closure15e3265aa33bc437ae
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure84f07af6a66f7ad86
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure94016a3fe16073ebf
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure9._closure16f3ffbd1d33710b19
groovy.tmp.templates.GStringTemplateScript30._getTemplate_closure1._closure9._closure17437b9d19feec5e99
groovy.tmp.templates.GStringTemplateScript313830e1e0c3fc1ec9
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1454c08b80d4e894b
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure1055cd4cc7235976f3
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure10._closure228926cebefb156034
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure10._closure23b4965673a1f686d1
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure11763f768fbc5aa275
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure1245371d69e76d7931
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure136b3959d98796a537
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure269fd2c391eaa11648
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure27dd693798257902d4
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure29d3b078cbaf14c0d7
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure3008f7da4ee4290e34
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure13._closure31826bbfa162e644c5
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure21bbd7ebd8b3a6017
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure37445e314fed4ca16
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure4e0e1bde1b53dc0fe
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure5ffed28e743de3c59
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure5._closure143af1cfb770d968e7
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure6b739814d3c59c1cf
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure7a471b23b7ff54b8f
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure7._closure156997e474c4f84e8f
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure8f54591b3a06e6358
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure9affc6597f288522d
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure9._closure163fd8aedc2ca7e402
groovy.tmp.templates.GStringTemplateScript31._getTemplate_closure1._closure9._closure17dfa13887359d3647
groovy.tmp.templates.GStringTemplateScript32cf2c2ec229fc9acb
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1d0a871068bdb94a9
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure1045b3f395fbba785d
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure10._closure2206958214b73851b0
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure10._closure23f71d6f2e3ff75f29
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure11900f0830ee82417b
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure122e8acc17ed3590d8
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13c02df73dfe3a0742
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure262a33635123cf581c
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure26._closure32d61a679120eaae4d
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure27eba2d507a2ccc8c1
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure298f92341f044f7b88
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure29._closure33bc3ab30e495eb528
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure302dcc67d7ebadc24f
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure13._closure317a0271fa3ebcb90f
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure2828252d7bea33be2
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure3e7be6593c7296e8c
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure4944780a284d305e1
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure5d88aac832dea0384
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure5._closure145bd6d5d440c2a3bf
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure651043dcce01d673f
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure7f1c4d32b6f449907
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure7._closure154645270cc5bcc5ed
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure88b83d2d9ebc4303b
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure92fc32f2ddf19e79a
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure9._closure16dbb19a9f0cdcd52e
groovy.tmp.templates.GStringTemplateScript32._getTemplate_closure1._closure9._closure17caced624680e8f24
groovy.tmp.templates.GStringTemplateScript339dd86bdc70031935
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1a3f4599309a89ff7
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure104a6666a44c1b7dc7
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure115de0dda5df35e07e
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure12081e7cc214fdc87f
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure1336de6d61d6a1996e
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure26d693fcee9b15622f
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure2769e4748d205f8ecd
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure292b8c0fac9d79ed42
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure3031250ca0eed18666
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure13._closure312ddacbccf5751249
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure2f597490e522bf2b1
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure396e8e7eed07df2fa
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure4b82594639476b914
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure5c5a82f5f080616cf
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure5._closure147b3423f550cbe577
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure60cef564c5421056f
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure7c2a80c249f2bd77f
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure7._closure15ccf499db3a80bccc
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure831c1ec002d5dfee5
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure9c029e9443b968b08
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure9._closure161796895e130a3a35
groovy.tmp.templates.GStringTemplateScript33._getTemplate_closure1._closure9._closure17561473baa37fe7fa
groovy.tmp.templates.GStringTemplateScript349115b087fdfd92ce
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure14b60827b86f1af6c
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure10654e8d304a7c6501
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure10._closure22a9f31b402f6232b9
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure10._closure23700b1d9503f4ecd9
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure11ec6ff54e4b338766
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure12f9f16eebf984430a
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure132604aaf50d6343a9
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure26f1f022d0b113c4b5
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure2786351038ada75ceb
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure28ba02034ee71ecc63
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure2937d6adb652f80d36
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure3067bb1ce5f4a45ab9
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure13._closure313ad1ed4c8609429a
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure200fc0a03d5918c09
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure37049689db4d227b9
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure47d0bfa24e70e8fdf
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure59645a44bf1827c3e
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure5._closure149998e11220f5350f
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure62d7f44cf58942ade
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure75aae110b4e273c17
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure7._closure1519e0a1fcc735d329
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure8760f540d7c9096fd
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure99fbdba59843a8cf5
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure9._closure16a363f2194c2ab777
groovy.tmp.templates.GStringTemplateScript34._getTemplate_closure1._closure9._closure17e0110b62d329fde2
groovy.tmp.templates.GStringTemplateScript35c3e1f599a4021130
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1383caaee0482a432
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure106a9b1801fddd609b
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure11218020db7a842663
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure12df65de3e004c1bad
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13d0f730a925f8dd85
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure260d50bd6f09c9fe86
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure26._closure3283eb57ba6e1f3999
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure270473b1b22f341ae7
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure28f132aeac44258f7c
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure2993c89605cbce9bfc
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure29._closure3326a0316e2190215b
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure307b527792f1d81e90
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure13._closure316d09577a4dc0e9dc
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure277e911da3919455a
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure3011feae0a386bbcf
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure45169eee5f7ab332a
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure58b672797d46e6975
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure5._closure14b97a173330fc73c7
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure670942f4feca8488e
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure769c2ce04be48726f
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure7._closure1593511f2b3809aa08
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure8cc4d6ad4ba095823
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure970577c3060b5e067
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure9._closure166f44e1d853fc586c
groovy.tmp.templates.GStringTemplateScript35._getTemplate_closure1._closure9._closure177ccbaefc1858953c
groovy.tmp.templates.GStringTemplateScript3634fd3abb4e029532
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1add8d3508217b9d0
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure107ae5a753253e6e35
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure11c7b05e64285cc56d
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure12b4d80f400a14f244
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure137be39e4d5c547ff0
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure26b8b11dafc0a7b0d2
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure26._closure32f7364a1e28766ae6
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure2732b8532da881d0f2
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure282c63588ba1684a5d
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure29cfeadad1609520a3
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure29._closure33d22fdd8e1f3bc56a
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure305e69ca0bfe5cd2eb
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure13._closure3195609921119a1416
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure2eed63db00c801eaf
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure392e46c679a7b1f55
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure425cfd3a6c645f635
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure5ac00a3f3ba5a56a8
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure5._closure14d85d0d5000e7b89f
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure696a993ce30ecee7e
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure73c77af14aef9a0e7
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure7._closure15bc83dc53394d216a
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure8b28b29bef1a30b40
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure9f068368a4d2455d0
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure9._closure168b2dd59b73876940
groovy.tmp.templates.GStringTemplateScript36._getTemplate_closure1._closure9._closure1769a4405f45cb2c5f
groovy.tmp.templates.GStringTemplateScript3766097fa517fd16cc
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1de84fbc50064b28e
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure1075303262929f6baf
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure110a5f8bf119eb6468
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure12924cbf95f3dcaae3
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure138d10041174cfe1dc
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure2644118210787d8ae1
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure26._closure324b7d417dea515bcc
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure27b0fef2a72a1296fe
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure286753f56902530942
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure296bf4e162f9a3b669
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure29._closure3311aa862e0aa2997a
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure304280a17cfb2096c2
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure13._closure31c2b82317da53bf50
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure299c32669e008d7fc
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure3e3b2ee1a8d2f8323
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure409adc767d6e04ac0
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure5b122202f9fb643e3
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure5._closure14f8bffb7110eefe57
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure6cb42f84e84d08c2e
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure70f1b701b5e96ee9f
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure7._closure1536326284c671584b
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure808c91767373ac59e
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure91f82f0e3a9ab3942
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure9._closure16470ac65a6c51865b
groovy.tmp.templates.GStringTemplateScript37._getTemplate_closure1._closure9._closure17f57ee5c18eba4481
groovy.tmp.templates.GStringTemplateScript382d668c0c55ff82c4
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1ccf164819ca5d8e7
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure1024b4707b29f05fb9
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure10._closure22473e29e91fd6f4aa
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure10._closure23ce27f8e37bf38b38
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure1114ae0fb300500b5c
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure12e7062b13d0e7e4af
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure135a561164ebd1ca7e
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure26f676a1d394aafde6
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure26._closure325cd42a48b59d454e
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure275d1a9a46b37074bf
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure28af45dbd17229d8e5
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure29f75f9ee4ff96e04b
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure29._closure33571ad94ecea6ed8d
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure30f355ea81cab76b55
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure13._closure31bb76d421f762b5b0
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure2b400bbab03f4e3de
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure3efa772815324b5d2
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure41f930f2820b59ba2
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure50bdbb5da4952834a
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure5._closure14ad04889ee09a186e
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure6d589b6c82986b11c
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure7bc7b954b0ce07636
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure7._closure15a6abac1cc227fea1
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure83d1659a45239db70
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure94f4090b1327c5a2a
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure9._closure1652c72315cdc673c5
groovy.tmp.templates.GStringTemplateScript38._getTemplate_closure1._closure9._closure17b5aeb1efa567186e
groovy.tmp.templates.GStringTemplateScript397f92c9120c00013a
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1bfad4c141ed6d3b9
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure102b61e54a9e515a23
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure10._closure22adaf1270dbcde429
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure10._closure23f0a110280e0cc390
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure11d941da2631e7aa59
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure12c1929bc6292fbc08
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13aca58b38c34a5452
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure260ad63e6c2c70c7d5
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure26._closure32e09f212b77ba7464
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure27df5c3bcc31e332b3
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure28e4757633d1129bfa
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure295341a55766a07681
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure29._closure33949f82eedb3fb19d
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure30efbc81f6cfcb2f7c
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure13._closure31ecae6e173cab1ef6
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure2c315a072ef7c2a8d
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure39ef1f0fc447029a4
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure433f11be930102757
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure516f936066cbe9601
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure5._closure148de67ebff0935ea6
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure68862dd489dbad34c
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure78f174a44fc8f384e
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure7._closure152c1a12cb3d1b8780
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure88754677d94a015ae
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure9a0aa56d8d6f336b8
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure9._closure169ee030d4d2109cde
groovy.tmp.templates.GStringTemplateScript39._getTemplate_closure1._closure9._closure17297414716e1670b0
groovy.tmp.templates.GStringTemplateScript46162e0af6cecbbff
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure17d670ef7405b8070
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure103a12dfeb60240716
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure112fbae006aef9c2de
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure1263f909be9fb15850
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13115654d1fd0a41d6
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure26b51b36ea0aeee629
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure26._closure3243bdf001bf3dee1c
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure271ace0b05be7d0957
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure28b00fdaad62a5b68b
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure29c1647c1e39dfb968
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure29._closure338e11d9d63f59cf13
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure30f1eede8c6376415e
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure13._closure31fd66318a1bc86dba
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure24972511358b45026
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure3fafcbc393c93180d
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure43a6344bc2ab29e9d
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure5482ceae4abead6b1
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure5._closure14d03e6c48b91456ce
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure6ea9c0c5fe8382364
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure768b6e7fa701ba309
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure7._closure15b4725bd8996e3cfd
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure84b70a8e83fd895cb
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure945f5ffa9e2689baa
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure9._closure16ba7e346b0aad707e
groovy.tmp.templates.GStringTemplateScript4._getTemplate_closure1._closure9._closure1793085bcd0557de4e
groovy.tmp.templates.GStringTemplateScript40fc986925c18e63bc
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1faa8b4f667b2db25
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure10eb97f11d00fe1471
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure10._closure22b5120285f25231c4
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure10._closure23f0292e22a4403282
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure11ade12c3266790517
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure12252242ae3748581d
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure132dca19423bd6f9c1
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure26aeb53ff06c512add
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure276c6047761267bc0a
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure285ac12785c69a60d7
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure29307219da2cedd7fd
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure30b2d82f29a44f3fc1
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure13._closure3178740eb1295c93d3
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure294dd0e276a35133a
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure36b25c0e69ae4f9b5
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure47716874ee2eb043d
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure57a2c46b072cbad78
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure5._closure145671955487a010ff
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure61e437adf0b6916b0
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure7c0056b29a04b091c
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure7._closure1566834e9b1c3982c1
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure859c061d26ba660ee
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure9d3b937ad0bb892b8
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure9._closure169c2b4866742e2b94
groovy.tmp.templates.GStringTemplateScript40._getTemplate_closure1._closure9._closure17554d9861269c0883
groovy.tmp.templates.GStringTemplateScript41ae6c2c3b9871e042
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure189f49c63e5c1d07b
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure10e442642cb75f11eb
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure11600ef9a757cea412
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure1203b6f27bce8000ba
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13db39831e134d67ed
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure265215a04fd48b10ee
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure27ee26e6fc90f4fa06
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure2811f18a6765a123c8
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure29946c2269b5db4137
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure30ae31445ea1337be8
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure13._closure312facb487e2953895
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure2e3c815fe86bdda69
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure31a73429b8db065c3
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure45b74938ff24eb8c8
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure5670ec56c5727b833
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure5._closure147693637597a95637
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure643a8115fbf5574e0
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure7f369b42650244764
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure7._closure15ec32f04ce305fbe0
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure8e3825f0bad3fae30
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure93c53f1c4ef37fe2a
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure9._closure16500c5ba76bf8c48f
groovy.tmp.templates.GStringTemplateScript41._getTemplate_closure1._closure9._closure17c9973dffeded605d
groovy.tmp.templates.GStringTemplateScript425970e31972716440
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure11c10e5dd6354cd99
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure10f43cdb7e6fbc1f45
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure11863e87180516471c
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure12680b2305c4d8e953
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13702d2dfa6ae1c598
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure26e7f4008f1de55eba
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure26._closure327f8f96a10ba45bda
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure27d8ed046317413013
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure28cca07c4080ece6e9
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure29c84e6ebd1e80fa68
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure29._closure33ff70298c29367bbc
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure308b0af9c7aeb7b793
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure13._closure31d7c57adcbecfc55f
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure27af73994b324819c
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure38988c41cb44dc159
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure42fd2aeccc3a07dd7
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure540694108391387ee
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure5._closure1417b47916a7b29d6f
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure6a595adde6311d210
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure7a6dcd536409595ec
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure7._closure15c3e03334e2417082
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure89d441c61e695fd53
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure9bc6cbb7ec2a64b9d
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure9._closure16b4656fe44b83f5a3
groovy.tmp.templates.GStringTemplateScript42._getTemplate_closure1._closure9._closure17dcf8d35cb07ed93e
groovy.tmp.templates.GStringTemplateScript430b84a6072b8ee7be
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure16f4ccd48e127c6c7
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure10fbe94e4fd81d1adf
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure114bd1528d34a1e619
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure124e9f93d03d10b1f4
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure1386deb7a6427a5bb4
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure261b549f30a53f6489
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure26._closure32c3c49dc2c9836af0
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure275aaba5e995d2761f
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure288790d1a223d7a5f6
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure296c50550e87b66ca2
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure29._closure333cf5722c3caf27ac
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure3097e392b0abcbf3ba
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure13._closure31801dc0ea75066e19
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure20de2224d5fac48cf
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure3f8de4661a3195d2f
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure403b0ba0dd305c122
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure55d4bc2d41cff92a5
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure5._closure1437568f37b7bbdba7
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure6f87ec65ed72db040
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure795b00a39b0fadb94
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure7._closure1549518de31d7d09a3
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure8270622b8200c338d
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure953867d172629270f
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure9._closure1678427c2554551ab8
groovy.tmp.templates.GStringTemplateScript43._getTemplate_closure1._closure9._closure17402276c27b0fb1e0
groovy.tmp.templates.GStringTemplateScript4407497d5ca6706c45
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure187d816a06e7ef65c
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure10d4c1a5dbde7a0219
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure11fa5e7a66a0a78101
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure12bf7081f9d0693a81
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure139604703299b88173
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure263c37410e8f39c213
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure27b57ac15c182aa439
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure28c603900f4a776caa
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure29700af71448378cd6
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure30c17d82f5b1be2f65
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure13._closure319716e66a067a3eca
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure2f8896140d8163677
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure31e7fc912c7b6886c
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure4c69ed44aa07df7e9
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure50ea649c0e57bf854
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure5._closure14d5fa4dd0c7850bdf
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure6d9eed4dddb989ff1
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure70db6171661f630fc
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure7._closure159c45b5c4e0c86646
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure860c89ab571c15b95
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure90c122e0a998520f2
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure9._closure16ccb707620b7597fa
groovy.tmp.templates.GStringTemplateScript44._getTemplate_closure1._closure9._closure17f6270e1a0b59abf8
groovy.tmp.templates.GStringTemplateScript4555bd3842ff8fefbb
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1f4843e35ec0dfd02
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure10db1430ea69db0783
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure10._closure2295c7d77b26256349
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure10._closure2334b465c40642588a
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure1137b1aff391102004
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure1299e4312c29a16226
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure1360f7ea6eb1231f5f
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure26c097deb137e3f820
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure26._closure322a7ea68a4551cc0e
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure27373c60d69ab9e235
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure288d333dede94c2fb5
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure29d414cca7d1011a1c
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure29._closure3365eaabec41f8efcf
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure30dd94e982b4c26b4c
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure13._closure31c0ce5c5ccdb3958c
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure28f9c7a99349eff24
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure36f294b6fd0e2141a
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure4eafcc08bb0d84b1c
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure51384ca1cc097ed1f
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure5._closure14f518bbf1d78c4d17
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure68405bf5d6fa4fda1
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure73edac81991997e84
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure7._closure1516f40b131ff41f67
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure8da8aa46cb758954b
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure9e3f8e8637d0a4c60
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure9._closure16009014a314a378e1
groovy.tmp.templates.GStringTemplateScript45._getTemplate_closure1._closure9._closure176afdab84c028c326
groovy.tmp.templates.GStringTemplateScript46a2a1f760158f6bb9
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure16160478b6a98e0e0
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure10cb6a8fb8b138092d
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure11d181d14cc3c8c30a
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure12f259e05223f98bcf
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13cbe3448ac88fbd2a
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure2675767e71fe8db674
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure26._closure325ea3bb2e03389f71
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure2701f782491d0c2820
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure285062cbca0c01ea94
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure29883680737a5aa143
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure29._closure339165470c7f530bfe
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure30f8af541bbb46a737
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure13._closure3138a7920791e96846
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure216a356f30107a4d1
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure3fcd2cde8e91fb080
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure49e5afdc881368e03
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure534e34e78aea3d2c2
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure5._closure14943fa192e797864f
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure6623803dcb3e05b51
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure76b6fa9098128ac0c
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure7._closure153926c86b1eb09405
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure8a44ce706fcf2c628
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure963c7a2d9509bf9d7
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure9._closure16e4f920e034d849cd
groovy.tmp.templates.GStringTemplateScript46._getTemplate_closure1._closure9._closure177f9245279dbb7a45
groovy.tmp.templates.GStringTemplateScript47f055b27e4c70e847
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1123c6f1ee8ebebbe
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure10c4bf1a8906990cb7
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure111c6e04d9f27f620f
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure12d4cd5087da31d368
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure133d10ded6e0142306
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure2689d6e1ce46578c47
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure26._closure32e2e8b04dc11fae5b
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure2783b123c39f9f6e2c
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure281b526628af3aa98b
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure292c28bbc0e36c3789
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure29._closure3352e01cac6aca57ee
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure30e4463f6cbe3ae31e
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure13._closure316f7f28315a20c300
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure261b64d2aed8f6d82
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure38d844f95fe4b2cf6
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure4b238e909919332f6
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure529c1cda48b4fc789
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure5._closure14b4dd57b3f79ec087
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure63fd3685c07dc3901
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure7580376067147e274
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure7._closure15b39776bce18ced24
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure81e0ed9df3a6b08f6
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure98c2d64b0b4149545
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure9._closure1628de33212b0ea6d6
groovy.tmp.templates.GStringTemplateScript47._getTemplate_closure1._closure9._closure17e348e0b956ca129b
groovy.tmp.templates.GStringTemplateScript48bb3a41d70e727c4f
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure10049f05a742a81d7
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure10953b5890bdf638a1
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure10._closure22919bde4bd28ab5d9
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure10._closure23b41e68790bba77c3
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure11029f809bebc40d3b
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure11._closure24f715e39f38be4f30
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure11._closure25484fd05bd52ce4f3
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure12a187c401f90a9d24
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13ea56cba37f0a08a4
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure263bb1c20daa80fb40
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure26._closure32f541db789ed3b0d9
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure276e554b2206fd8c6d
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure28d3444890df40782c
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure29b083c446e55961ab
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure29._closure33145043ccaece2319
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure30559374918fad1e89
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure13._closure3116b1df077711c9e0
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure24c75d0e80e7359a0
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure38191d30e20401a07
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure4a406214667c6e394
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure5933858515dab0720
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure5._closure14e166245c07ea26be
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure6211826daaa8a0433
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure7eb63935623317add
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure7._closure15230eb824e5da4bce
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure82bd1971c5f681618
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure9dcef04e22fc3f62d
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure9._closure163d13d66e8a995348
groovy.tmp.templates.GStringTemplateScript48._getTemplate_closure1._closure9._closure17a398b4977d174e74
groovy.tmp.templates.GStringTemplateScript49e9ce04c9578dffb1
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure17315d8cff6598a89
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure109aeecda10a573d3b
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure11cf70550eda73ac3e
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure12871374d400c2c583
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure131ca551ff57919688
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure26c7115db2125ac173
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure27ec13eaa8846eca61
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure289874e5727c7b3b33
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure29149dfff57c6ff761
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure30497a1fe68ad15aa0
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure13._closure3141696531bcd862a6
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure23b60cb31e2fb90f3
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure3f0c7517337148671
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure48864358777635f61
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure58e1adb8d7847126b
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure5._closure14c184d27d17e36076
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure67cf34d5a1eb66663
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure7d80f4c59d35e34a5
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure7._closure15a9bf06f31ae632ef
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure89193a9c599f1d8c6
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure93305c28bcb4c9abf
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure9._closure16f134c5af954fbc53
groovy.tmp.templates.GStringTemplateScript49._getTemplate_closure1._closure9._closure173f421109b66626aa
groovy.tmp.templates.GStringTemplateScript54425562fbe5e48d6
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1f5f23c672f90c22e
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure109ab982cdbe4c2a00
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure1131ba4b5b8827aaf3
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure125bfe1721c678d974
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure1391bf1fc4dc38a9f9
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure260b28b469dca9a1c4
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure279293d93930d69e47
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure2844ecca2b5204949e
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure29cc5e65074ec9eee9
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure30960cd079f67f3ba9
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure13._closure312cd62dc8a1787606
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure22612df8c8b51cbed
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure3ad2970dbc1434a04
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure48980955352fb476b
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure55d060a65e1144936
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure5._closure1404619c21f4d6dce2
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure60a7a2a97bb42bb48
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure701ac08d5f5a4a7f5
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure7._closure15c7c979d7e400ec9e
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure897e57157fa3d7b1e
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure9a5970047b39340b9
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure9._closure165e61cb1061b937a1
groovy.tmp.templates.GStringTemplateScript5._getTemplate_closure1._closure9._closure173bb87d459cb2ab18
groovy.tmp.templates.GStringTemplateScript506dea9d60dfd79c3f
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure185fbe8def230a82e
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure10267824883149b574
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure11aa5cc3e7f348b2b6
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure11._closure2409a6ae6a81a2d03d
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure11._closure25efc610e8b1a35efa
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure12cf14d61ee2b19045
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13107cead867fe625f
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure2681999f6fd3e9f0e7
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure2767a201d798e52f4c
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure28481a172824395b94
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure298a3607e19f74e16b
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure3088f4c642d34a4385
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure13._closure311183d60b1f975a78
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure2e17a1b3cb3d99bf3
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure32ff49664e7f3ad29
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure4e94ae55a23fba181
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure519616433aeee416d
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure5._closure14efd177a2a6b019b9
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure6691e91b48bdd2ad2
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure78cb607f6afbb6652
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure7._closure1551b9ff25cbc6beb8
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure8fe5a23ecb260f920
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure91336dd6b625c1dd4
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure9._closure1688b76f75b531fd7b
groovy.tmp.templates.GStringTemplateScript50._getTemplate_closure1._closure9._closure17f2f142c4b85779eb
groovy.tmp.templates.GStringTemplateScript513f1ed87e86281fc1
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1f6a7c04b7043a370
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure1029adb1b986e8b0ee
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure1167b31672c2ff13b3
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure12e98066cb1b79c8e2
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13e68f70844f65fc73
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure267d3900d06b33cad4
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure27e5e4a05d1a766940
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure28032abaca8702188b
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure292e283c52064277a1
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure30941dad35d63607ac
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure13._closure31465b6c3dd45ef13e
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure2966f00e55f5152a0
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure35ea21419f0a7315f
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure4c528f19b335e1d74
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure50443e7ef8b025426
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure5._closure14cf338183b6b95f71
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure634f5fa343fe14882
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure7bfdad8f95fd4282a
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure7._closure15db0841f234fac799
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure844181d3574f937fe
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure9fcdc1b0286d37146
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure9._closure1644907cb4aae71260
groovy.tmp.templates.GStringTemplateScript51._getTemplate_closure1._closure9._closure176e2be75a73261135
groovy.tmp.templates.GStringTemplateScript52c802175c6c289bc3
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure16343b9f5f6d6be92
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure1039d30eeb5e0bbe40
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure11818368cd9027f0bd
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure12823db7b51121210b
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure134d9bde6036c95e06
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure26c8d8a010a25d8480
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure27d32f42c29dc3a355
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure28de7b4ced624fddaa
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure29720a7086ad19ccfe
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure30b12610acd9b2cbd7
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure13._closure31be32a26688040cf4
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure20f502c8f6ac80955
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure3cd59929ec95a95c5
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure4b18eccd802b0d86b
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure52324638be5366bfb
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure5._closure14ae149be086a29429
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure6d2c846b5e3a5ee72
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure7ea6fb9e94f65faa2
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure7._closure15f4da828a35be4cfb
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure83ade5e5f3f53649d
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure97ce351b8ab42c4f1
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure9._closure16a0f948f78a9c234c
groovy.tmp.templates.GStringTemplateScript52._getTemplate_closure1._closure9._closure177b4409f92eb5a856
groovy.tmp.templates.GStringTemplateScript539af6524235d7183d
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1101f916074a5b5cc
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure1036069bdae9aabbda
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure114c6cbd58a19051b8
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure11._closure248615e2c0cd8fe1b9
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure11._closure25ac4d29b52fa28702
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure12a4a90760e8e979ac
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13bb68443c1e52c02a
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure2634783faf1a87beb3
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure26._closure32fb98d6c9aa414dc1
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure275169e3481f50e559
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure28954be10fc1749eb5
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure29d6144b35342f5a34
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure29._closure332736f7779cbabef0
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure30adcf7bdbdcce8ffe
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure13._closure31e9ea185043cda7b2
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure2784537568640c006
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure3bc0f10e3de0e09b3
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure49decd8191215649e
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure53e06e057c0da7eb0
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure5._closure148ef66dc196abd2e1
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure68f232d3557998c22
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure7d90366e6bf0ab4da
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure7._closure157e6b3c5dca8235da
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure8809c6086f9caaa43
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure9930997d14fcda863
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure9._closure166cde5b36954acc57
groovy.tmp.templates.GStringTemplateScript53._getTemplate_closure1._closure9._closure17e79eac67e5c4c088
groovy.tmp.templates.GStringTemplateScript54963b8919b82993c6
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1f88b4a88fbfc8557
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure10192e704eefcda31c
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure11fde395b3359636a0
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure11._closure24c3e2400d91ce9233
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure11._closure2515ddb3c5665e7c5a
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure12554615490590f2d9
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13abb283a8c5901aed
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure26131be19130811829
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure27beb887fd92a8377f
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure28d4d8a0a2a8d457e9
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure29ca4ee92ffbaeba40
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure30fb516b9ec6bb5321
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure13._closure31fee13ed030b1f761
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure28d2e745b01fabebe
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure35aae9f90baa1dcf0
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure458c2b65e616d5255
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure56deb6b43395e1441
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure5._closure146c5aaf26e6950299
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure6aeb33fb65b2ca393
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure741057bc96e065fb2
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure7._closure15ab7f047a37375a3f
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure8c752d88ba807c25b
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure9cc9dc4ccf061af9e
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure9._closure16d82b2071ca6a4115
groovy.tmp.templates.GStringTemplateScript54._getTemplate_closure1._closure9._closure17519bd4bf9592da90
groovy.tmp.templates.GStringTemplateScript55c4cfcc07e1d61038
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure18bd7621d798f8e09
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure1016fbe57f586ca686
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure11300c4026042197a5
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure11._closure2429737b9455d582b0
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure11._closure252b5b5b0e13a134f2
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure1273d2a59cfc58aa7e
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure135d4119f4ed0b84c1
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure26efbb7e2e885b221a
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure273cfe2677103b7173
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure289fe80d400bef14f6
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure296e50d29c62982c8a
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure30e7b800e9c3c71708
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure13._closure31a93984e6fb785c27
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure2fa3b6f82ed7277ed
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure32bf81dedadf54086
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure474a0a29f71c8eea0
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure570c9e89f1cb2010a
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure5._closure144cb85907f69c4451
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure6f3585436ef10c1c3
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure77269a4c69e6911ca
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure87d10e6526e9e0c85
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure9237702a514eec30c
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure9._closure16140c33b0d5bcae0e
groovy.tmp.templates.GStringTemplateScript55._getTemplate_closure1._closure9._closure17cd4171215ee3b24e
groovy.tmp.templates.GStringTemplateScript5633d303250bd6943a
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure11e331ba3ff1a93eb
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure1006855a2d808fa828
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure11d63c3e9956f974ab
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure12186f74e2f6004397
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13f655b71094a726b4
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure265a5adeee41356c4e
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure270a35c4e8978ebb66
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure2842b9fb67eea2d1d7
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure2932729e48c9c397d5
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure30c283bd70cc43db73
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure13._closure3151504abda722a1ed
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure2630443e8d8eb2c18
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure3b8039b6a9408e41c
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure400069fdc40262bbf
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure557ae6cfb72863ed7
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure5._closure142d9f4364c6878f09
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure61565e8b733546733
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure727dcc5d68ed8c342
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure803d6a53825345fe6
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure9a348481f397f76bb
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure9._closure16f06507f3f5c79f22
groovy.tmp.templates.GStringTemplateScript56._getTemplate_closure1._closure9._closure17d82e9f8203700b2d
groovy.tmp.templates.GStringTemplateScript576127463b522917c4
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure16d6f33367d6998b5
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure100950cf1c372eadb2
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure111bd3eb0c674ed5ae
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure123efbc4370fc81b30
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure1300a62d4cbc3cb898
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure26a6fa4151f9ef567d
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure26._closure32dab4fb46a2dd896a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure2788736562151dfd6a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure28098956854d9992c8
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure29966ca5fb50f5011f
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure29._closure33492399f7cadfceb2
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure30de6ad607c93f9f5a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure13._closure310688f08b6ceb0aab
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure2141158313463e54b
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure3c9551917835c786a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure42c648b1d5083974a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure54a8cef27576a2b9c
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure5._closure140d7db545d68ec9c1
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure6488e833787680563
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure714b01ad97eb78d3a
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure7._closure1584adc7023673d15d
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure8b9949be1e3ad9138
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure94ca28e76ddf01a29
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure9._closure163c421432ea117039
groovy.tmp.templates.GStringTemplateScript57._getTemplate_closure1._closure9._closure1744f43a1cc80163f3
groovy.tmp.templates.GStringTemplateScript582a48b592102b83cc
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure17f1aac72e1a8f2dc
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure1058d48d058c4199a4
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure1105226f4e7ef5ba9a
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure11._closure242d2f72a4a17a5420
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure11._closure25abf156b31e591bbb
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure124bb150b12cf3557c
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13d7e038392322933a
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure26149d62921538217a
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure2765970d838c7f1f2b
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure28c19f783d3de3436f
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure290ac7da7d56c0573d
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure306fbf9dfaf8a862cd
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure13._closure317f4607bd41da004b
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure239d2c5f3d79fd169
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure3c540858c5d574e9b
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure43a5a4352a6d64628
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure5f0757ad2818eeb35
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure5._closure1458c6c6aa26fa2ff8
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure65645cdb12a3e3851
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure7a7d0ff892cc11593
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure88c4bd52286ae8fd6
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure91c60ee2446277941
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure9._closure16298ff17d4b8685a7
groovy.tmp.templates.GStringTemplateScript58._getTemplate_closure1._closure9._closure1704246e32e3dc3f1c
groovy.tmp.templates.GStringTemplateScript5978bcf08c49d40032
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure10c4684e763dbf982
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure10570118343be09c3e
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure11c8cdbadb4f421b9f
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure11._closure24c7be493d656144a3
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure11._closure259577be786ba65313
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure126d25e064d53b0ddb
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure132113a2650bb90d16
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure26e83dfd2dade21b49
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure27e7d1ac090eec5927
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure288aafd5df9ed80070
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure29aed9e1cecff6c1f7
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure307356f68dfdd426e4
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure13._closure31289ebd8b8a13ab0d
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure24ec7de2a3b17183a
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure3b41607f14a03d2ed
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure416385793b673fadd
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure5ed57f90ea462fe7e
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure5._closure147824308b36f36930
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure60baea6319e025a01
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure794bc2086dcae5beb
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure7._closure159e85b74dcd190e96
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure83609ebfb40374108
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure9f38a284da2a815d3
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure9._closure16e5a8e2bc54506abc
groovy.tmp.templates.GStringTemplateScript59._getTemplate_closure1._closure9._closure1798fecbac28ad57c2
groovy.tmp.templates.GStringTemplateScript62bed8daec9895dad
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1dc4d6bd79fcd04cd
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure10cb4465a6dcf45d3b
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure1113bbb6bce3451284
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure1213f734802c225a18
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13a084c2fbbf6f9189
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure26797c33eda66069f2
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure27ba75af7ca32a2776
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure28e9c9fba103e7f2a0
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure29db104e2cd7f3166a
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure303e2ac3674964b4b0
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure13._closure31ee06090f6ea85ac3
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure297b34c2cff7f67b0
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure3555725fcc733bc1f
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure4eda4e762da212d70
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure562792be63e17e9bf
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure5._closure14c8818c9a22914297
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure69b5041cf4ecd133d
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure7ba8339a57b65aaf1
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure7._closure1553041fc663b39c3b
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure8425b1b97b4134860
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure935300075419f2d8d
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure9._closure16c241ca9ddc85ffc1
groovy.tmp.templates.GStringTemplateScript6._getTemplate_closure1._closure9._closure17726816dc369d34e3
groovy.tmp.templates.GStringTemplateScript606e7d81affd3d9cbb
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1040e0ca74cb63d33
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure10c0485a376391567a
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure10._closure22b16720f2c1da07e5
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure10._closure23875423f332abcc13
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure11a29af3994c1a6a55
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure12414f6bcf9cbbc8ac
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure1356a7fe768387cefd
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure26f0ec7ecf13209ea9
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure277be4ca3507629a86
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure287f7746de03dc1651
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure29f4fa25ad4bdfbad0
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure30c681fdff4a45c749
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure13._closure31ab9bbfc544cb0085
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure27f932410d9ec02a8
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure3e2876de260ca508d
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure4fbae436760ca4f44
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure5bcb603b7ca807552
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure5._closure14953050b8c5800272
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure6f0f8ac080a016e74
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure75963b297bfabd780
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure7._closure1508f62de6b3c7fa33
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure8a6f4e5afd82b5373
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure9e2a6e221d8718c61
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure9._closure16b5130641f611864a
groovy.tmp.templates.GStringTemplateScript60._getTemplate_closure1._closure9._closure17aa342d2a1b0aea52
groovy.tmp.templates.GStringTemplateScript613c89c4b1a4c21f45
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure177522432cec5366d
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure10cf9dcf06d43053e0
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure116f75260c7dadcb50
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure11._closure243d7826bfeff5ed8f
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure11._closure254583731a99c2178b
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure1267dbdb1a6573900b
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13a054642aab1c50d1
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure260c4ce170abfaa49a
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure27f9a26bbf85f1dc8a
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure283447eb3ca0e7554e
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure2950e41e1ed2e92c1a
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure30da6896884f398360
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure13._closure31fc4305f38f02abc3
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure208863fc93564cbfb
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure393d1ef9f779eccfb
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure4d7cc57a6706ff3b1
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure5a194806bef6c6019
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure5._closure14b5d2a699d58944ba
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure6ad13c788be3d0c24
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure76a0f6d984fc499f8
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure7._closure15824793314cfb8312
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure81cb6db761eb29dad
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure90d4c24483cfee0f3
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure9._closure1679341580e9c76951
groovy.tmp.templates.GStringTemplateScript61._getTemplate_closure1._closure9._closure1736ee88b4d07b828c
groovy.tmp.templates.GStringTemplateScript62cb950b934ec29b47
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1e2b65d8c48502b8f
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure10dfe370540cd35d4e
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure11894558b32f75285e
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure120c660a646f2b79e2
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure130b40caced2b0f2a4
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure26b9ad41b06294eace
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure27cf6989200244169f
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure28e9161d1b45aa906f
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure290cc652ca79b29745
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure30ff532b1140bd4f1b
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure13._closure31042acba8d3585609
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure291b913a300fd900e
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure3002a69184e636861
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure4a36a6ae5418136ae
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure586f3040f81585fc4
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure5._closure14d4f5bcfae5928fe2
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure64b2e7b096279aad4
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure73fba0c885f754b70
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure7._closure15ad9550494dbf0870
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure86270981c5518cece
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure98d736ef2116f5544
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure9._closure169d5d21c3c9bc587d
groovy.tmp.templates.GStringTemplateScript62._getTemplate_closure1._closure9._closure17238166178de83bef
groovy.tmp.templates.GStringTemplateScript6399614e8d173d18b9
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure191ea7519ca2320d1
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure10d036e565bb7258d4
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure10._closure223ed46c588df73661
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure10._closure23c4df1aaeacaa15eb
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure1144aa8d261ec2895b
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure122af2bab196e32145
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13fdb35092fa2b6c88
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure26450dde0fda4ed0fd
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure26._closure32b37c0bd40e072492
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure274d2f28aa80d75093
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure28a226b0f9e691d370
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure29a8d86979e084018f
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure29._closure330b72789b7c841514
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure30e3ba406645c10b32
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure13._closure3153f2719e1891fd4f
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure2e6ac087aec75595d
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure3717ceb655937f417
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure48f087e2451248a5b
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure59bd187d3a4b44a8f
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure5._closure14f4174adbf59bc92a
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure616c51089d645c884
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure70cd6d387af1a0508
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure7._closure152724ee9eb2837151
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure8d832a6c593810010
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure96299a89bf5e039d6
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure9._closure16517a3202d66ab766
groovy.tmp.templates.GStringTemplateScript63._getTemplate_closure1._closure9._closure17bf5bc38946995331
groovy.tmp.templates.GStringTemplateScript6495ac95d69ac39342
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1797eaef1457a104a
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure10ff1e0ef1bd154012
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure10._closure227b23ce95d1b645eb
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure10._closure237d4f80dee556eeb3
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure11f525a5cd8ac4ee43
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure12db1da8987b9aaa30
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13ed69970621e9b64f
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure26626e0031f0487667
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure26._closure32e68d3bff40f2b346
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure27a2fe4c1f0d2f82b5
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure28e3b5f1548f311a2c
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure29b482cb632f05e1fb
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure29._closure3391e8fafb144a8167
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure30b52450235fb4d7ed
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure13._closure3144f9571e6bedad9c
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure213c74b776bcf27e5
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure397dd64163d982154
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure44a261063225cbc90
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure5c83c0cc75d30207e
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure5._closure1416bb883c85a51952
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure63755020adaf0e735
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure794d0cea87e16ee60
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure7._closure15f230d6b94f361eb4
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure89ffc1ec8c24c6808
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure93d0dfb864a4c3e2b
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure9._closure16e58f4945894a3a24
groovy.tmp.templates.GStringTemplateScript64._getTemplate_closure1._closure9._closure17095ebb5136cf4929
groovy.tmp.templates.GStringTemplateScript65c758d0c8c33c10bc
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure10a228664c7091b14
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure10f0cb9bc00ab44588
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure10._closure2291b2f50c15ad5568
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure10._closure2343c9681590a9a61b
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure1138ca7058bb734f46
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure12fd89184d8252f297
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure131b9a0d5a09722863
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure269ece9f8e48924c54
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure26._closure325ac6309c82d5826c
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure2720b8ed958fbcc4b9
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure28a8855cb62c0a5933
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure29109cf0d0b6337731
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure29._closure33526da15b01d3dd77
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure30a9cd3b545ac893c4
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure13._closure311321ed28a02406da
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure264d250ae8747eeb6
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure3e68be66b2accbd22
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure4664404a232f90065
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure5d51e8f1b78dc3535
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure5._closure1436597e1d95ac5f9a
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure66abe698a6ecc8565
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure7a7bc11a78e79a018
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure7._closure157881686eb00a6795
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure825be201104d5a6d6
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure9d2e73defaec352b9
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure9._closure1629a85a84969cd53f
groovy.tmp.templates.GStringTemplateScript65._getTemplate_closure1._closure9._closure1795841ecffdbe21f7
groovy.tmp.templates.GStringTemplateScript6630441fea293c94be
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure19fc6ffda419c06f6
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure10e0b52492d2574b26
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure10._closure221e01b9a6598064ec
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure10._closure23004251480ea87fe3
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure11defa0ee7e9abac48
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure129634c933880a1b7e
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13b08ea3be70de8a16
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure262b2f3f4e81fc0200
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure26._closure322e1b2d38c4bcd113
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure2716730f0a08090eac
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure2875d4aa91c9479c12
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure294cbebc041d68cc6e
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure29._closure33a6e24dbb3f783946
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure308cf686cd554c5fbf
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure13._closure31eb482373fc7efb10
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure2fded7cc4b2deb543
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure3757060ec133119b8
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure412e239e10317c57a
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure5f2790b7f16e80ae8
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure5._closure14577e647ea5b794c2
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure68c83d50bb2882395
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure7f20970b79ec87290
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure7._closure155753ab16b14eecf7
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure85b78637b4f7ff5b5
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure952d877558352e70e
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure9._closure16cdc16ec7b6e7e413
groovy.tmp.templates.GStringTemplateScript66._getTemplate_closure1._closure9._closure1780ebf06ca02d9894
groovy.tmp.templates.GStringTemplateScript6762b05af470c31740
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1ec9ad74fc3ef0da8
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure10ef60b1a365f64ebc
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure10._closure22f490823f9d9b746f
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure10._closure233ec4b9837b57374b
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure111315db72d81c0d4d
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure12b0a079e671c243d9
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13467d39e25845143a
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure26d78fa0f139263833
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure26._closure329250265b069be039
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure279435ae808a9a48a0
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure283ee407736a7cdf0d
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure29e8a087b7845e5aa4
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure29._closure336567161b2ae16556
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure30901fedba50301b96
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure13._closure31bc90994537b75056
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure28af8671d5e567c10
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure30426e291046585ce
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure43e802d2013b2798f
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure5ef5b88a333041fa3
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure5._closure14779c925fb5bed20a
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure6d168be8b06b441c5
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure7c165afb86ea73ce8
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure7._closure15dde215c14e7295d6
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure8e13a5da289e63b6b
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure9bd32b13c67dd8b9c
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure9._closure1601e67d06a9310b08
groovy.tmp.templates.GStringTemplateScript67._getTemplate_closure1._closure9._closure171c3155f26b5cf04a
groovy.tmp.templates.GStringTemplateScript6829dfa95d32c18348
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1feef480b5f2e67c1
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure10bee4f3bade997aaa
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure110de45f30c1a76279
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure12c5eaed6052f90d95
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13913b2c97c75b3f98
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure2665e88332d5f14f34
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure26._closure3285f94d6e5957febb
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure2779d1c66113f8aae1
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure28f6f229cb1a060eaa
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure29740bf831826b0c86
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure29._closure3323d7497beee511a1
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure3021caa64761a7e601
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure13._closure31c55e6e731a865ab6
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure2a73bfadfbdaa4832
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure308337e0ada6eb33f
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure428bee56fe5e7a8ed
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure555a21d56e5e0df0a
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure5._closure142227e1b045ca3433
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure6cfa3f00dabe27cf7
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure772054ae83cd1a441
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure7._closure154d7bdb594a24333c
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure8d4e51361ece52585
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure9edf0d16efc0ae8f4
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure9._closure16142b984908a6fe96
groovy.tmp.templates.GStringTemplateScript68._getTemplate_closure1._closure9._closure175ce101dc4081aca5
groovy.tmp.templates.GStringTemplateScript697b2bec436b3e00b6
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure18db3609edd5d6c9f
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure10b131668b69387f30
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure11c00b8aa5f010c37c
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure12e37e5db5ab315532
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure1367c8b6cbefc0a1b4
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure2699481c8d6d2b7507
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure27fb9767eb916beced
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure28bdc28429b93d4db5
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure29d015c3821b5d9a4c
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure303d23cd3064dba228
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure13._closure319286d445d14ff1f0
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure2d02ee10651228161
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure37965fc77cd3a2f49
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure404dcf1aef5421418
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure548809e8ac00cca41
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure5._closure1402c5179155c372fb
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure692489b8d1fde1ea7
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure7416995e7ccbeea39
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure7._closure15c7ca658eb5184a1d
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure86ea72db82a7ceb5b
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure9021a170718858466
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure9._closure16d80c8b881770118d
groovy.tmp.templates.GStringTemplateScript69._getTemplate_closure1._closure9._closure17c03ba4428bf0c47b
groovy.tmp.templates.GStringTemplateScript70eaa3b2e1b3bae84
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure154d85947f0064693
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure106bef3880029c702d
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure110dbb1de1c59b7aa9
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure122bf02a1f75ebdb3c
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13206d89ee9e5d79a6
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure26c74fb16e70272e1f
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure2732287d402d81b066
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure281d2aeb273346d0b5
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure29d62a5735a0e541eb
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure3059c8cd92dc6dce47
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure13._closure313fb6154dd418417f
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure2f8d3c2b32c9afc7b
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure30282e91e3ae3ee16
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure45e47368da268f486
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure57753cb6774e97638
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure5._closure141cde7cf36f53c8bb
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure67bb667071db78b11
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure7d399d68afedaae0d
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure7._closure1520bf3dc91edd4c58
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure89ecec22871f6a6b5
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure9d552ff9b1064f69e
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure9._closure16265e35e6b791b81e
groovy.tmp.templates.GStringTemplateScript7._getTemplate_closure1._closure9._closure17dad83054af7841b5
groovy.tmp.templates.GStringTemplateScript70ff0f75eae3646338
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure17b5d508fd9344e38
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure100da78fa25226f77f
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure11a5271c4cd92bddf4
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure11._closure240dd38c1db22ae61c
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure11._closure2598bb1d392748a06b
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure12ab79ff7f494200f4
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure136b110decdfaf5563
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure26dfc0de50ac984493
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure2770268c948de009c0
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure286dac7673e17f2d12
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure294ebe3b96f8468c46
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure30fcad14943d40bb0d
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure13._closure31c26c677f7200c92e
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure20a34310b00008a61
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure3a6563b601ddd0411
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure465f22173a1daeaf8
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure5dffb213416a59947
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure5._closure142c90b24ee4900b34
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure687a547638ab55216
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure715d0de48b05bb8ce
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure7._closure153fcc9c586438c64a
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure8016ea79101edcabd
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure9222908e7b195030d
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure9._closure16a18f2152370e50a5
groovy.tmp.templates.GStringTemplateScript70._getTemplate_closure1._closure9._closure170d88f78f85c19b3a
groovy.tmp.templates.GStringTemplateScript71adfb30f4ba9be0c6
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure10801781a5b474566
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure1002721a93e587f2e5
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure1168c8c9d9e89c7cf1
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure11._closure24e742b7847631f69f
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure11._closure25a63df5f252b7e8c3
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure128ded4faab08a5853
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure139de297b0f734cb4f
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure26236041ef14427ea0
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure26._closure3243b65618e98b61f6
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure27f2602d1e0f734fcc
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure28269cdb9142446e0d
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure29eaa0002561701a8c
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure29._closure3327bb4a80f7a33469
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure30e0447fe3383cff24
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure13._closure3195b4dd49b9c96268
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure27d212ad2ec884332
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure3d700b91d0a899867
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure4499035b2b17f560d
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure5c2d9a2e833498c0c
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure5._closure140c72446ff4994dfc
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure6da4e2ce33e893046
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure726bc01474034f6b6
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure7._closure15b57d228f9b04bf6b
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure8bb2c9948c7740463
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure9cdc3ce8e551a6f9f
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure9._closure166da8329328d8bfbe
groovy.tmp.templates.GStringTemplateScript71._getTemplate_closure1._closure9._closure17915252114eb0f3e4
groovy.tmp.templates.GStringTemplateScript725ae7ffd6509b64c4
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure19de501a4ddd25884
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure10120ca5c13d64fc4b
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure118ef8b766ba449fff
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure12e6509ed4bad2b1ba
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure1336f639548e98693a
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure269681e12fdd2c30f4
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure27c4abcf8188c685d9
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure28fbcd2db6a709ab2c
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure29b6824cf1ca2ba1d3
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure30c57fc27a37b8335f
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure13._closure316ddd1312e5939fa2
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure2e41e06b8d91118c7
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure344fb3f9a33743cfd
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure43d3608f180919312
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure5e5be268c5d7db3d1
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure5._closure146d555e0cc48286a4
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure63c739062e2cd96b6
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure7730960575085243e
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure7._closure159aafe1f79a403409
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure8c5eada228cde5700
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure94dfc8434788bda28
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure9._closure1689c106d008a38e92
groovy.tmp.templates.GStringTemplateScript72._getTemplate_closure1._closure9._closure17843dbcb213234a87
groovy.tmp.templates.GStringTemplateScript730813bac80964e73a
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1eeb929315fa153da
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure101dd930f08ac5f9d1
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure11431762f38bf33efa
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure12c0c42e01431ae91d
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13c005a308a603f716
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure266a217e9065f60ac7
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure26._closure328b2040df6dc503a3
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure2746ed6e0b0a55c3d5
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure28b0fd80540432e833
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure29129c7742531d3719
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure29._closure3310b1fdc0dc918c48
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure30d996a90d32c47776
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure13._closure313a05a9242e5a34e4
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure2930b1d613599d194
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure335adbde72420a08b
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure411541c3090342fe7
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure5f89ca5507891a69a
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure5._closure144db7a82dd48bc06c
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure66198fbe256f1f4e6
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure74065bf58a0ea6a46
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure7._closure15101e5f20657c4d28
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure87fa8e4fb4a4799de
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure9a216425d9c04b6ba
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure9._closure1645e6151117756189
groovy.tmp.templates.GStringTemplateScript73._getTemplate_closure1._closure9._closure1718e7192cd8522259
groovy.tmp.templates.GStringTemplateScript7404de6193849a6cc1
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1062df2d9d0f86341
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure1032f1db648ca2e117
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure11f2984a181ff559e2
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure11._closure24c797627aa246a412
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure11._closure2562a0be14f0b582cb
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure12312b3c28ae636268
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13d0df649c7dc12dd1
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure264d42a0ae4ff0ac5d
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure27a93c0abe87ad11f3
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure28f16ec1f96d92216f
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure290ec6d5589c9cd76d
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure308f08b94828b1aba9
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure13._closure312d0e8fa45d266437
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure266605e6cb223af2c
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure3d30c3294408f75c8
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure4d47a7277e34c192c
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure5ab712e448115cc6b
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure5._closure14af1b6acaa4b51014
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure64008e9615a44db57
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure7d863a27771e6812e
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure838665cf61b8af1c6
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure9fd82114023a8b147
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure9._closure16f1136e564855eccb
groovy.tmp.templates.GStringTemplateScript74._getTemplate_closure1._closure9._closure17aee261f4a8043841
groovy.tmp.templates.GStringTemplateScript75562a248ddd65ef3f
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure17571da4c528b681f
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure103d244e553b03e48d
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure10._closure224b8864378c694e78
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure10._closure23a077eefd5bdc5953
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure113f779f8d2e42f8e7
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure1217bf8cfd57ab3acf
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13262cfec0555ab3fd
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure26b1e23f11f72a966e
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure272b7aab34053e57ff
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure28ba5e6c1bcea96270
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure29aad8eeeb05aa41a7
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure3093e1d23f2dcdef80
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure13._closure317ad6359296efcf71
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure2117545b55eab667f
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure3a25ab0e957dbe9be
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure4f81866b6f3e9a5d9
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure5b653ad98a4f9d920
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure5._closure148ff99cebb4bc56dc
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure61de382e1ee78b907
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure7eb0f7d788189cf56
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure7._closure154fbbd9d067f55bec
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure88224622fdd133f18
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure91268d729c727ddd5
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure9._closure163d347d97578303d0
groovy.tmp.templates.GStringTemplateScript75._getTemplate_closure1._closure9._closure173238c46a6375509f
groovy.tmp.templates.GStringTemplateScript76a136ebaf37656b3d
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1e095a3f2d41e75fd
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure102d5af107e3e0ea23
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure11d947e1327c9a1be9
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure11._closure24a2b515492a708515
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure11._closure251fad6f821b4b139b
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure127c025d835df3d326
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure138d3850242cf61188
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure2604039fd13e44d83a
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure271db149ab828b9dea
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure28670f9a3c2be4a751
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure29f6faa23faef1faf8
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure30b6da6fa6224923fb
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure13._closure3182bffbc9cab532bb
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure2884a69df6b323d8a
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure331a1366e6e264d24
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure48cbe5bf5c20760c6
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure5913429fccacde6fd
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure5._closure14eede868884a79d84
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure6fbde3e60323c1ff7
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure7beba1c6891381dde
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure7._closure1560691aa866b1d08e
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure8fce2214596b96c7b
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure992579d93eab66862
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure9._closure16d95d49d477f832fc
groovy.tmp.templates.GStringTemplateScript76._getTemplate_closure1._closure9._closure1727572ac93ee6e9fc
groovy.tmp.templates.GStringTemplateScript77f3c2aeb16e9ae8c3
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure193c98b67566d7ea3
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure10228f64365441efb9
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure1114a834a74d2dbaec
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure11._closure2448242ed0ee6b9596
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure11._closure25212b87496eb45b33
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure125a96ed56a43b8b81
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure137bcbca78046d8fa4
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure26f8a3006e869ee209
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure26._closure32aa0c6d506559c708
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure279ff7e8210018dbe6
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure282c3f37de88dfe44e
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure2952e4998c37c76c32
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure29._closure337ea493408af4fc0a
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure30aa3304d1273567d2
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure13._closure31d56741ff017c99fd
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure2ff5f720687baf4d9
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure340f7b4137972d152
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure4a0dc4f34d2a2dc33
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure58c16aa20ef21f3b6
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure5._closure14ce3c70a994aedb4c
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure6a63555e086007da7
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure78dd6c367615753a6
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure7._closure15ead8a47f998da9af
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure846a01f9c5020a2a5
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure97dbd5bfa0e3904f0
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure9._closure16157a5a15682edde7
groovy.tmp.templates.GStringTemplateScript77._getTemplate_closure1._closure9._closure17bb8d8f57f5978122
groovy.tmp.templates.GStringTemplateScript78b8ad5d182c987ccb
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure181bc1423caac14ca
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure10730b262fef2edbaf
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure110a59b0e55496d5d8
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure11._closure24295a50d392f26201
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure11._closure25dc8c5b6288b2e52a
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure122fdc79d08700c5cd
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13ac8ddf0d9b73a406
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure264ac423ad6a49950e
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure26._closure32bda506653a95d98a
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure27721380c0997a39a7
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure28e4291966f8a535e9
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure29ce4fe60a31f23a10
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure29._closure333814cc204ef088fd
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure301be64f2c16a29a45
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure13._closure31aca9b6c92c4d931d
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure2d29cefc46446c0fb
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure34ce22888a779e7a3
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure4b6e2877b24f70d51
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure536ef3fd539c5331f
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure5._closure149b87034664da3d75
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure6b8fe1b662b564095
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure73eb626373321cb0f
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure7._closure157a416ae79ddb0f45
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure8737f515f3523bc4b
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure92d7f3ba895ee6798
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure9._closure1600b7bf5ac9b92879
groovy.tmp.templates.GStringTemplateScript78._getTemplate_closure1._closure9._closure17fb5ddb79de4addcd
groovy.tmp.templates.GStringTemplateScript79ea5918067567ff35
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1f2e03cb648df1f94
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure107cdeb31e588fde35
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure10._closure22a545569ebcdd886b
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure10._closure231e5b0b8b23db3eb2
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure11c7b66570652174dd
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure120948c9057ec89d6a
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure135a7e4551b3e83a2a
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure26b664bc12d293af3d
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure27f055214a1be97fab
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure28af19b4845b9e76f6
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure296a51ddb9a8c4acda
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure30070f245b13dede6c
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure13._closure31fb710cffe784385b
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure2a589f41d88ce09a8
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure33db4aaf5b02d7bd5
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure49a8093ba3452b1a4
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure52bcdbc091c292654
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure5._closure14bb65f56774d37bbd
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure6e51570e69f6a22c5
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure70ddaf938c34e8577
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure7._closure15f0f0d43062e77664
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure8c93d6f86f3ba7295
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure9c295fdc171610b0a
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure9._closure16cc90ac9bd66fc762
groovy.tmp.templates.GStringTemplateScript79._getTemplate_closure1._closure9._closure1767877ee7153bb513
groovy.tmp.templates.GStringTemplateScript86e418eaab3b0ee12
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1db9a5035812c9bfc
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure10cde54246eac5dbfb
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure11a7bd179b03732302
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure11._closure243c09f01aacdb8a4c
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure11._closure25d3a43382925fd6c8
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure12f3dd873936db55e1
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13d5b9202c7056a016
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure26cc4928fbe1c9c6f0
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure27b957d213f18fed93
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure28d49b1c8425282e70
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure299c5cd0b05d345b64
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure30817692f69f187d39
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure13._closure319626a2952488deac
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure25ff41f92880ee151
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure37b05e8a52550c063
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure41af28c7a09db36f0
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure5b5d06cebd7e45495
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure5._closure1483be2ea7e00a2f18
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure67e35a13c3e0682b3
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure7540a22384b1f971a
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure7._closure153747c39e87a3fe6a
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure87d8903e907605831
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure9d769ff622a592f79
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure9._closure161afe325dfe5e51fd
groovy.tmp.templates.GStringTemplateScript8._getTemplate_closure1._closure9._closure170449f5abade9a1a3
groovy.tmp.templates.GStringTemplateScript80f2c41a194a2661ac
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure12d7f25109da88f52
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure1013560be04b9d984b
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure118ef9edc89b32669b
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure11._closure24caa2f263b1567feb
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure11._closure258f76bae50caea6d5
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure12cc4cb5ebcf613bba
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure1386a64bf9aa304b48
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure26db62b9736f7493e4
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure26._closure32248af4101ef39cc2
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure271d7b68fc6c796b22
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure288774605d590f57c3
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure297b4290e97e40b910
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure29._closure337968a17f82fe6c0d
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure303b0cc1dfc0712cf0
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure13._closure314015a988442df824
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure23f79f296c2e37655
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure33eea2efc86030f27
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure43c841fbbee2cbf29
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure53d71d8a1e3737d87
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure5._closure140df5093d0b607fd2
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure6a9db8c2d0c18042b
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure7f753beade009ce56
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure7._closure15b3bc0596fc3c90ec
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure88b7b78dfc288c9a3
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure975f9ca87e30ed46e
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure9._closure166abaecb778afc750
groovy.tmp.templates.GStringTemplateScript80._getTemplate_closure1._closure9._closure17865827dbabea4666
groovy.tmp.templates.GStringTemplateScript81a0305f0713d9e252
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure15e230d851fdb840c
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure101c839ed1fc3c9dd1
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure10._closure2246bdf42e9f799581
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure10._closure234da1ea0ca7c77d4d
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure114316385daa85c79e
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure12ead8053e36a9631d
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure137055d1a582abd564
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure2627c226ccd7aea9d7
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure26._closure3298c1ff73dcd4ade8
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure279f3dc976eeea2d2e
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure28cc44cdbffa3414dc
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure29df5cab5ae7762fda
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure29._closure33baedfadf9767301d
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure3027e5aaa8c50d68d9
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure13._closure3117cd13be8fe45362
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure2486ce94f2e6bbf06
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure34fbcac8191579351
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure410e60b7afe8903dc
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure520535b7dc69f68cc
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure5._closure142d17ff1c1b69391a
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure6f430e7adb824667b
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure7c43f61a21066802e
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure7._closure15390dbb410300e9cd
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure8313946060411077d
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure99a130cee0781b8fc
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure9._closure16a69dff766779284b
groovy.tmp.templates.GStringTemplateScript81._getTemplate_closure1._closure9._closure171a828245609b2eb8
groovy.tmp.templates.GStringTemplateScript82572c9025f9d96650
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1cbc7743b994e99ee
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure100cfd218324df937f
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure10._closure22c90eb884d354a405
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure10._closure230e2ad35139c6a4b5
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure11a52646e2f85d2490
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure128165d4403cf18af4
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13db417f41fb077711
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure269223860c1ec0e783
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure26._closure32ec1ce2d79abdfe97
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure27a9f62be9695fe73b
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure2811153b981f79d1fd
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure29837ee78e4c2d9485
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure29._closure334e62163fa9ccd42c
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure3002de1731ca89a4a2
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure13._closure31efa4dde5d3beaea8
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure2d153c5251bf2e4f3
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure3dc472a06a8aa37cb
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure464403639cf67c6c3
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure50734df19a8ab5711
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure5._closure144c30e57f2b72f242
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure6120d5b2c6460c08b
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure7918a00b200d752a6
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure7._closure1516df7839024462af
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure84fff056c4fbb541e
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure91a2c46542a100d4b
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure9._closure1642f4cb3547021967
groovy.tmp.templates.GStringTemplateScript82._getTemplate_closure1._closure9._closure170fed6ce63d0897db
groovy.tmp.templates.GStringTemplateScript8305d8d53ba026e5ae
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1b89b5cae1b3d92b0
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure100328b4b2937e96e5
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure10._closure22239f831d174fb486
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure10._closure2330ac3b9a4c39ec1d
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure1168c99377c9ea8595
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure12a7f16495c539d253
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure132db2e51dd39ce93d
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure266e8319b3a61addb0
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure26._closure325057e9b4589acfbd
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure272bb08a63ebcca137
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure285a25967abc4292e2
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure292760dc3dd51b024f
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure29._closure338de74d9fbc55883c
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure301e377c46cff5e08b
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure13._closure31b87c67d3187705ee
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure2a646defcf77a2da0
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure3ad11a87bbffeabbd
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure4482222f8dfc27a36
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure51a165cc58d47425a
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure5._closure146cd2135e3b7bb48a
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure64fe630acd05ca2db
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure7a2e6dfbdf0b81cde
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure7._closure159c6ec6eefd781b8e
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure8f5bd3bb589229ac0
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure9f5c6803dce9f61d9
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure9._closure168ed3d8f458d4f67c
groovy.tmp.templates.GStringTemplateScript83._getTemplate_closure1._closure9._closure179337c978f679ff05
groovy.tmp.templates.GStringTemplateScript8409150e602dd86e55
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1500f87469464a22b
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure102c005f2695198e23
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure11d946bb9c5dece28d
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure11._closure2400e61c04a13a3de5
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure11._closure25756d19c8db538475
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure12561e76bc28405926
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure133d682289085e33fa
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure2649e0c78d8c1c7b2a
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure26._closure3205a6d99f166f5869
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure27c461eed666347311
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure281bb6d7d7d5e25bbe
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure293b3a7e271a9ae23b
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure29._closure33177dcfffd49b1c4f
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure3048a96c03d5803c54
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure13._closure31af7741536b0b553d
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure2532d9df170c05318
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure34bb02708db517efe
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure48d0c4cbfacba4cfd
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure549fbd7d174c328ab
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure5._closure148e7ed1b94b4564f2
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure66e76222fdce98d6a
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure73ae0c29221b4f7b6
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure7._closure15497afec900cd746b
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure8b27383b8d8eff2d8
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure9aa52d32071336624
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure9._closure163a26a3b307f47b3e
groovy.tmp.templates.GStringTemplateScript84._getTemplate_closure1._closure9._closure172532b1a0862fe51d
groovy.tmp.templates.GStringTemplateScript855be14b7e7427edab
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure12353afd31617a975
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure1023d5ca1722b88bb9
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure1114a96e096c5b4388
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure12708ac669d1880181
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13cb9bb8d520c5add6
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure26b540583234c64119
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure26._closure32b9edd2fcd4486943
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure2746274f5ce4a7351d
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure2850867a3576d918a1
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure299f24459483ac74f1
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure29._closure33d4f8945fc102405f
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure3054400774d0fc787d
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure13._closure31f8affb65a0c2fe7b
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure2243886289c489a4b
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure33ae6a575cc05e288
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure4a16e587ebc1ff008
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure554d9540d512f3de0
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure5._closure14ae9c27985b4c223a
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure6339d49af68d5ef3a
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure7098c1d9dd1dbb9ce
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure7._closure15c3cb401efff10d4a
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure80831bd611e763c06
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure945b8154995bc0ab6
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure9._closure16f601b07218229425
groovy.tmp.templates.GStringTemplateScript85._getTemplate_closure1._closure9._closure17b9e8143e4d5e8dc3
groovy.tmp.templates.GStringTemplateScript86acfd845c9e2769a9
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1b6b7d66d9082b497
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure1033ab7545fa5b8517
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure11f29910b63e83a086
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure121b371717dbd0e868
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13608f163159690fa3
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure2600a1f8f2fda80f4d
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure26._closure32cd30cf5892213a3c
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure2770ecadc36312ff08
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure288dd78c129394dd80
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure29c306094028f7cfae
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure29._closure33207778bfffa9a46e
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure30717bbaeddf78b406
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure13._closure3100c6353efc9803b1
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure2bd07aa42a9d1c1be
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure3a91d23f2f5f84612
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure4d5c8653d8df13517
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure573bed0693f1b023d
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure5._closure14cfbb3dfb6b57e962
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure6d5a0f52eb49149ca
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure75c397c8dc16a6b46
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure7._closure15ec198366feb58628
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure876f7fe0b55dc6f65
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure9c5875ff3b82dbf01
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure9._closure16126884313859a509
groovy.tmp.templates.GStringTemplateScript86._getTemplate_closure1._closure9._closure17ac87fa9d10cd34a0
groovy.tmp.templates.GStringTemplateScript87fe09c142c7d8ea57
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1c5ebfef812f1bfc9
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure103c7ee0744dfa808d
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure113f76c5230f340183
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure123da3a7c22218b0cf
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13967c8c6d71f2918f
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure26fc01674d4572357e
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure26._closure32717bc43b50060b16
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure27f2aa0c49e181b904
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure28c6e721f030af9e9f
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure29671832f3b1c15964
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure29._closure33e3f2231fea30f87e
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure306d92d19ada04f02f
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure13._closure31571e8f083751a8f7
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure2ca12b19b455908ed
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure3d84ba18fe2acda64
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure4f9aa71fc9d5489e2
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure56e9c53b51af71776
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure5._closure14ef59cbda7b5eafaa
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure6884b9eae00ad2b9a
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure76f55a3823105253e
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure7._closure1566a83db10189ff09
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure8ccb5c0d29345a1bb
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure92a6d999a5ca2d393
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure9._closure16de4f97f0278f4a12
groovy.tmp.templates.GStringTemplateScript87._getTemplate_closure1._closure9._closure17305d5f03dbbc5c7e
groovy.tmp.templates.GStringTemplateScript88b56632eb85da7e5f
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1d79e61bc8e30d5a0
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure106dfaa26df695b49b
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure10._closure2288a513797bba011f
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure10._closure233710449c7dc270a4
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure1121874161168f6eb7
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure1248e933440123fe83
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13413a9918eeecba2d
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure264e66448ea9a54279
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure26._closure3266d2af0e0fca1594
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure271f4e64a878e35b45
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure280ef10f4840d54f38
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure29fbb34d75b7f40f46
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure29._closure33a5427c7f2e348c89
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure30dc479a67eb930db8
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure13._closure312ed0783e1a60a217
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure2e7d12c59a6a53ccf
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure3d45e3d143ca7ec95
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure4ef94b9b36b015880
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure5d465c640cc13d7df
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure5._closure14bae2b8358b2a4993
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure69680d028adfb16a8
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure7dc3546d26373bd97
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure7._closure15f631f32905df59e3
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure8f96a8e11f646bf55
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure97aaff9c8c775b0fb
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure9._closure16cb8272bf8618bf8c
groovy.tmp.templates.GStringTemplateScript88._getTemplate_closure1._closure9._closure17708d0b2df0610091
groovy.tmp.templates.GStringTemplateScript89e79277f5dc25fda1
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1a4c249290c43defe
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure10622f375c4134b101
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure10._closure22623428e0bfa1119c
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure10._closure230996ac57083d380c
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure11ec6894f42738cfb2
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure126e7d8391f8eba624
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13b7c90344c6772401
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure26b2c6db31117f784a
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure26._closure32da99a46dcded24be
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure279d08c522fa701d49
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure2845c1a2aae3ee0c27
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure295fad76c62ec2998c
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure29._closure3366c727df3badd099
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure30c0aef110eeef4991
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure13._closure317908c208d1a90951
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure290c437804a2df59c
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure3a508bf692bf370e3
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure4c3f6ad727ba4e475
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure5c947459ce9ffc294
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure5._closure149a004e149b230f5b
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure6cb6bbba819c774f8
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure7ef5999dd931cf3ef
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure7._closure157c804dfefae320c2
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure84328b0c830df718b
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure995453fa123fadc69
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure9._closure1607a5617e99ce5097
groovy.tmp.templates.GStringTemplateScript89._getTemplate_closure1._closure9._closure17ec57aeb33b10684f
groovy.tmp.templates.GStringTemplateScript94b06382a61021d3b
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1530f62a5eee7d9a2
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure106d4e1f6034adf6ed
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure11b9bdbcc625ad4b2f
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure11._closure2429b3cd110fa2e658
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure11._closure252c9cfa3ceae548a8
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure12cbda99a66f12d4c5
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure1355506b3951644839
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure26727aaa78378e811d
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure26._closure32901a4f52169de9fc
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure27310a002f7f247a83
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure2820780c0215890c65
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure299166c9a92a220ce5
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure29._closure335d0d8823a315bdab
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure30e6949c030a1107ce
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure13._closure314796bed79e38c510
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure23094910d5beb7a9a
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure32cd02447d880926a
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure4a9115d957192ef06
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure5a0fa8c6a9d1acb12
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure5._closure1457e1deceadc8a534
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure69ed387f46d7c1a9f
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure73d10cd17cea093e6
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure7._closure1544fce191facd2e09
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure8a11cda56c285b6e4
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure9370b008c7ba2f46a
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure9._closure16fee1cd26954a1622
groovy.tmp.templates.GStringTemplateScript9._getTemplate_closure1._closure9._closure17acf9d323340cd4f5
groovy.tmp.templates.GStringTemplateScript9063b6ee5c547f9e2f
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1522c7938082afc59
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure10deb9de757a2a394e
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure118944021d0e03d13a
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure11._closure2410986358289264fb
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure11._closure256cc83c0dc7db599d
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure12267a215b1a98f3e2
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13bb10b863f618d0d6
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure26f44e19ecd0cc49de
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure26._closure321cd6bf1b7d31bbf3
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure2716b92e5de6fbf864
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure2895af50f0bbac6c80
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure29c1068ed2cdd98f86
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure29._closure3362ab242422ebf551
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure30012028b4b77450b4
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure13._closure3129e2713272e6318f
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure24adee78d1b0ffe9c
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure37a3b787efb145bbb
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure4a2d87daf2f3c1a95
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure55e3cfa223f569192
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure5._closure14b455ebcb2a707694
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure6de8667468cac3849
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure7bbe0d272eff9a118
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure7._closure158486b4282bc3ac95
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure82ce13ae11b4e506d
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure9b57620418aea5b02
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure9._closure167e26cba4b9b011bf
groovy.tmp.templates.GStringTemplateScript90._getTemplate_closure1._closure9._closure1721e4fd7e3521370e
groovy.tmp.templates.GStringTemplateScript913142ab420d801dd1
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1217051ad8a59f707
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure10d16c4b44cd8b3cd4
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure10._closure229c87651506bd8e91
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure10._closure23ae1f6ce46cb28205
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure1144abd7883fb4703f
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure1200ee918ee350ab45
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure134de3223fde834efa
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure2608ee8653681673ed
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure26._closure32a09db478bf168ad9
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure2794ff8fd76468be68
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure28de9ffd1218972f9f
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure296518b56154ef194c
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure29._closure33a12e7f843772a941
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure301dc943c3b208149d
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure13._closure317e3acb04b92f9ac9
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure23dcbfc54f78737cf
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure30b6dfa03ec40c7cd
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure48eba696e3f99a660
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure5431e79fe1aba84d9
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure5._closure1494b71dea3a79305c
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure6836d0cc638905a19
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure7888c0d7d1f96ef60
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure7._closure150e370affd4ffd5b4
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure896a30438ddd79eb3
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure95a9ce6286e653790
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure9._closure16b201d865a666fea4
groovy.tmp.templates.GStringTemplateScript91._getTemplate_closure1._closure9._closure17bd3e58e0fe505fd0
groovy.tmp.templates.GStringTemplateScript92c65e6460e78099d3
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1b49428130ccceae5
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure10c112f4161568327a
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure11a29ba9376d6c9331
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure126b5340f0e90842ac
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13e6f78cdba72fec8f
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure26bd0f2693a1783db9
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure27a2346d48e3dd747d
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure2803ce0b35fddaeabe
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure29393af9b5ffb4a213
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure3038f2fe5abd8cd8e6
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure13._closure318653055fe5756703
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure2a4f4d03ec21e6c3a
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure398967c84d5bd6357
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure4fa1c542d0e77637f
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure56479fd9a748ebb04
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure5._closure14f59007890a62fb04
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure66550b047e4d4fce9
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure7dd396c6d0f273de8
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure7._closure1521e5c987d5bb5ed6
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure8e8654752967dcdd0
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure9daa3ac9243f48227
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure9._closure165668ec26861dcf88
groovy.tmp.templates.GStringTemplateScript92._getTemplate_closure1._closure9._closure17a851b643a3c3e6b3
groovy.tmp.templates.GStringTemplateScript9394aa217ebe7f1a2d
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1c7c800868ebfe1bb
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure10cec76127a2c937e0
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure10._closure22f9a512268e8baf96
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure10._closure23d312bd72874c1355
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure116f747ca25cdb3234
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure124dc7f02510c01a0b
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13100416878fb472a3
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure2641afb92c19a2078a
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure26._closure32680ba2bf3b58e88c
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure272072ccc2614e3271
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure2848fea6d75ee1a9a1
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure299d24c206668234d9
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure29._closure339624c8c41c401160
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure30241b952db8f09ccf
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure13._closure31d18bbf692ebccc45
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure2d3e1cbe72e96a569
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure3e9c0fef9c2e9ff21
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure4d67e40ec1ed2df8a
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure5795b7e465162ae4f
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure5._closure14d572f1a81a6bbdcc
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure638bbdbc750e89eb9
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure7ee55b362ff487390
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure7._closure15ab5477502a8727f7
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure85227798b50e4030e
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure935496afba77beeb5
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure9._closure169a4fffe799cb2093
groovy.tmp.templates.GStringTemplateScript93._getTemplate_closure1._closure9._closure17348b13dd68b28e6d
groovy.tmp.templates.GStringTemplateScript949867fa25338191d6
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure12f5cdb6e01e6d120
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure10e1ef8ab3a4ae2f26
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure11defb5449c8dd552c
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure12bc28e20cfdb9917e
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure1300ded1135476a864
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure2666cc671233a4a110
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure27cfa3a877ecb6e057
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure28096de77a374160fd
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure29817e601ca903d4ad
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure3072858568a2854010
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure13._closure31c68099e95dc09c96
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure2268a88eaa92cdbd1
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure30f61718aa6462a62
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure413502eab6daae941
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure52ab6f552a8e6c4be
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure5._closure1437de334f6a556db4
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure6192bc9445c5db108
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure77653ae4d2e4498f8
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure7._closure157e404f77d7324812
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure815e9c18601296b16
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure96add39e618d7e948
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure9._closure162eba84a0c6ebadd1
groovy.tmp.templates.GStringTemplateScript94._getTemplate_closure1._closure9._closure17828e6b0518e49475
groovy.tmp.templates.GStringTemplateScript95ca93bf3b6a7e1228
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure15c00f3fb8395da7e
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure10ee3a1f82130f2abc
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure11131481dcf96af429
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure129abc52d90471c9d9
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13f62d4b4f7ced3648
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure269a6cf8ad8b7e9b23
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure274de509fd6e25a65b
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure28425d4a98947a23e2
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure2925605baf30354267
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure306e6cee1fa7f90439
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure13._closure31915823df960937d0
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure2519f933345a41282
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure37e37f3f7b112b614
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure43f323a6a7d0f55b4
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure53794768e8d0ad1f5
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure5._closure14173cc56e7a5c2b7c
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure644c0a2c4e861d358
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure7453f7142de2bd680
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure7._closure15f4f1f1a0280e3133
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure8afabff5fc7b0a5c8
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure98537ff8ffc5885da
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure9._closure16e29d9761d93d42ca
groovy.tmp.templates.GStringTemplateScript95._getTemplate_closure1._closure9._closure171e54ce9bd395fcab
groovy.tmp.templates.GStringTemplateScript963d8f7019807e962a
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1c9e48a450500c79c
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure10fe44a0d0cbec2412
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure10._closure22d970c7d85afcfd1b
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure10._closure23178ff694254e795d
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure11f524ff63abb21727
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure12f10183a70e292030
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure135d39e5ab0541943d
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure262f8d586d4210d577
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure26._closure32f56c8453f1e31d0d
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure277b2eeb62e9906c4e
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure289f0cbcbf7137e6c3
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure297942177b9b6ef938
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure29._closure333bb4fde45fbc3d32
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure304b575386a87dc842
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure13._closure316931ed84ca53ca1a
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure2c8a0bf59703d4977
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure3edcc757088ef128e
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure44b9407294ce190ab
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure510f3f2eae33eee28
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure5._closure14761bdf0d4a47e024
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure6a2fd1e45342575a8
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure7108a1052ce9a0408
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure7._closure15db2332d8294aba51
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure8d16dbc358c1af6ab
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure90508b535d1c9306d
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure9._closure1606f4a322f94673e6
groovy.tmp.templates.GStringTemplateScript96._getTemplate_closure1._closure9._closure170b3b20388e0645c8
groovy.tmp.templates.GStringTemplateScript976f7b3507d98115d4
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1bab8a2d08773ccc2
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure10f19135e17c4d2188
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure1138cb2af69a05b622
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure12d7953372f7e17897
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13abca7ff72dda0a11
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure26d32dc7d2facaef44
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure27f9684ae86b032a42
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure28d43c115dd20ca5dc
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure29dd5c2cc802586ff2
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure3057be38f1ad018c6b
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure13._closure313ee957b2019a615c
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure2bfb5a4809cb58024
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure39c9af70d9fbb8ef8
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure467f613e85c442c5e
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure50dd17136c6d2fb63
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure5._closure1456f9292c5a4ea6ec
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure6ff1675c5801917f8
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure723e6cf5d3ef54a70
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure7._closure1551928c0fd676c370
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure86b2f82ec4a833875
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure9eae2735c35465cff
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure9._closure16cad3b0e3e6909cfd
groovy.tmp.templates.GStringTemplateScript97._getTemplate_closure1._closure9._closure1797e185a645772d16
groovy.tmp.templates.GStringTemplateScript982414c6ae9b8381dc
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1a8cd3d941bb2a6ab
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure10a01577f8c722159e
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure11263aaeb483bed916
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure12a2dfa7f4d4da36db
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure137c8c6a82b2c421b3
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure26614ae411161d9843
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure26._closure325e8ee4056c0832a5
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure27148c2209f261c803
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure281c2a3fe5a276747b
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure2941f7534e046d39d0
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure29._closure33be81f9248e2115d5
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure30e66b730c9c9671fc
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure13._closure314727a0842cab6bbc
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure2927639427f49b406
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure3908f6b9641b0b809
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure471c8dba7aa11fd3c
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure5b728e4c310363bca
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure5._closure1403425ac3aa3a40d5
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure6e1dd3b432d4f2aca
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure790862a0d6c83d2d9
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure7._closure15c10b4297d220659a
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure85ef0cc2f2f80269b
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure9ba20130eae913f97
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure9._closure16df1e55ac47076963
groovy.tmp.templates.GStringTemplateScript98._getTemplate_closure1._closure9._closure17d731d1886eaa71f9
groovy.tmp.templates.GStringTemplateScript9976e083b0c27c0222
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1db91150199c1adf5
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure10afc0e2c970831004
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure11ebd57b21b2097813
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure12844b17212d126e7c
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure138a7ff0de9a5fbf9f
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure269dea7baeaec7a270
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure26._closure32e2c5ef66ae2f038f
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure2796ca838370f28e0f
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure28571a9207014d3764
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure29e5e968fd9d5baf1a
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure29._closure337d04a2849bb849c5
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure30fa82187b99ea35d5
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure13._closure3110ff1ab2e762c0fa
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure2e563229b93c17d55
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure3e1d9e9eb56e4247f
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure45daacf66bab441c9
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure5aa0a671f35da2e81
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure5._closure1423a0ace2ba33061d
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure6bc3650c39973489a
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure7a3eaf5029cec9ca1
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure7._closure154bbafc402d1c1cbb
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure8e4b2f2f6e919e845
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure955cad5674a1e5305
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure9._closure161339466d58d18678
groovy.tmp.templates.GStringTemplateScript99._getTemplate_closure1._closure9._closure174beb7416a5db1927
groovy.transform.SealedMode8e35385e91f7bd15
groovy.util.CharsetToolkite32ee0ccebd9426d
groovyjarjarantlr4.v4.runtime.BailErrorStrategya52ab34a277a6e6e
groovyjarjarantlr4.v4.runtime.BufferedTokenStreame3fcfda35266063f
groovyjarjarantlr4.v4.runtime.CharStreamsca109f7c2d67f13b
groovyjarjarantlr4.v4.runtime.CodePointBufferb24aa6446932c3af
groovyjarjarantlr4.v4.runtime.CodePointBuffer.106aa7b4e1ce36f07
groovyjarjarantlr4.v4.runtime.CodePointBuffer.Builder516fa724190c2a38
groovyjarjarantlr4.v4.runtime.CodePointBuffer.Type8ec5d50a006df7c0
groovyjarjarantlr4.v4.runtime.CodePointCharStream2c8d895ad8131701
groovyjarjarantlr4.v4.runtime.CodePointCharStream.116c488dfc67ebad3
groovyjarjarantlr4.v4.runtime.CodePointCharStream.CodePoint16BitCharStreame4ee70bbfd238e15
groovyjarjarantlr4.v4.runtime.CodePointCharStream.CodePoint8BitCharStream74fd836f3bdd89de
groovyjarjarantlr4.v4.runtime.CommonToken76664a5e1805c19e
groovyjarjarantlr4.v4.runtime.CommonTokenFactory20e6f598d22ae440
groovyjarjarantlr4.v4.runtime.CommonTokenStream8c890a21402d482c
groovyjarjarantlr4.v4.runtime.ConsoleErrorListenerc78fdd4f376d9cd1
groovyjarjarantlr4.v4.runtime.DefaultErrorStrategy39f29193b6617160
groovyjarjarantlr4.v4.runtime.InputMismatchException88bc8f3c6636d379
groovyjarjarantlr4.v4.runtime.Lexer043f41e0eef8d1f6
groovyjarjarantlr4.v4.runtime.NoViableAltExceptionb645458ded09ef0f
groovyjarjarantlr4.v4.runtime.Parser95f4816644829053
groovyjarjarantlr4.v4.runtime.ParserRuleContext7cac32796bcb2cd1
groovyjarjarantlr4.v4.runtime.ProxyErrorListener913ef73b748b67f6
groovyjarjarantlr4.v4.runtime.ProxyParserErrorListener5e1abca886eeaaec
groovyjarjarantlr4.v4.runtime.RecognitionException16d6e1c65a0bedfe
groovyjarjarantlr4.v4.runtime.Recognizer448ca4c5d64e4cbd
groovyjarjarantlr4.v4.runtime.Recognizer.1cdb766da5463f4b1
groovyjarjarantlr4.v4.runtime.RuleContext3f339741da968e66
groovyjarjarantlr4.v4.runtime.VocabularyImpl1d7f4b732c5b9a10
groovyjarjarantlr4.v4.runtime.atn.ATN84164c3706eac3ab
groovyjarjarantlr4.v4.runtime.atn.ATNConfig4fc26dad5baf193d
groovyjarjarantlr4.v4.runtime.atn.ATNConfig.ActionATNConfig1d33560bdc109d45
groovyjarjarantlr4.v4.runtime.atn.ATNConfig.SemanticContextATNConfig0bd2c44a1402d3ae
groovyjarjarantlr4.v4.runtime.atn.ATNConfigSeta7b2675c7054a870
groovyjarjarantlr4.v4.runtime.atn.ATNConfigSet.ATNConfigSetIterator49e3c9c569489b4f
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializationOptions691c42b08688ed26
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializerd8df86b11aa80fd7
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.1693321ec9dec2816
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.2f93c6f95247a45b5
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.3045ba02e5ad24c57
groovyjarjarantlr4.v4.runtime.atn.ATNDeserializer.UnicodeDeserializingModee57821b8b4ecafdd
groovyjarjarantlr4.v4.runtime.atn.ATNSimulator3238c2e30e535323
groovyjarjarantlr4.v4.runtime.atn.ATNStateb1247f709bbda629
groovyjarjarantlr4.v4.runtime.atn.ATNType78d1db4da16f56d6
groovyjarjarantlr4.v4.runtime.atn.AbstractPredicateTransition185239ff0296d54b
groovyjarjarantlr4.v4.runtime.atn.ActionTransitionef2151c069749561
groovyjarjarantlr4.v4.runtime.atn.ArrayPredictionContext4a7897de8b971373
groovyjarjarantlr4.v4.runtime.atn.AtomTransitionb74478f46b59ee89
groovyjarjarantlr4.v4.runtime.atn.BasicBlockStartStatef6fa498c8112a013
groovyjarjarantlr4.v4.runtime.atn.BasicState112144b3748f3011
groovyjarjarantlr4.v4.runtime.atn.BlockEndStateaf5ddc1871100f0f
groovyjarjarantlr4.v4.runtime.atn.BlockStartState47252c6593ceea4f
groovyjarjarantlr4.v4.runtime.atn.ConflictInfoda565252dbf6b276
groovyjarjarantlr4.v4.runtime.atn.DecisionState3bcf571d6f4817a8
groovyjarjarantlr4.v4.runtime.atn.EmptyPredictionContext0a5904080bd13518
groovyjarjarantlr4.v4.runtime.atn.EpsilonTransition35abf09440d98d56
groovyjarjarantlr4.v4.runtime.atn.LexerATNSimulator455ba09f3b17e021
groovyjarjarantlr4.v4.runtime.atn.LexerATNSimulator.SimState18c46c0f02415130
groovyjarjarantlr4.v4.runtime.atn.LexerActionExecutorb86d0e83a9b4d8ac
groovyjarjarantlr4.v4.runtime.atn.LexerActionTypef04e9096abcdb737
groovyjarjarantlr4.v4.runtime.atn.LexerCustomActiondbc939fea57f4263
groovyjarjarantlr4.v4.runtime.atn.LexerMoreAction750558b7aa4aad15
groovyjarjarantlr4.v4.runtime.atn.LexerPopModeActionccc8ef9d0a3ad803
groovyjarjarantlr4.v4.runtime.atn.LexerPushModeAction0e25e8593d91bb4c
groovyjarjarantlr4.v4.runtime.atn.LexerSkipActionce0991cd843a0ead
groovyjarjarantlr4.v4.runtime.atn.LexerTypeAction5d3eb047cac55b48
groovyjarjarantlr4.v4.runtime.atn.LoopEndStatef368749727a2c2e7
groovyjarjarantlr4.v4.runtime.atn.NotSetTransition932996df5aee2421
groovyjarjarantlr4.v4.runtime.atn.OrderedATNConfigSeta690c351c38b7581
groovyjarjarantlr4.v4.runtime.atn.ParserATNSimulatora78af0589eece4e1
groovyjarjarantlr4.v4.runtime.atn.ParserATNSimulator.19653d191a6fbdc6a
groovyjarjarantlr4.v4.runtime.atn.PlusBlockStartState97c195b221c838ba
groovyjarjarantlr4.v4.runtime.atn.PlusLoopbackState042dbc7a7e8ffcc2
groovyjarjarantlr4.v4.runtime.atn.PrecedencePredicateTransitiondfef810349242117
groovyjarjarantlr4.v4.runtime.atn.PredicateTransition3dea5ef3e8cd72fe
groovyjarjarantlr4.v4.runtime.atn.PredictionContextcc7add07af13e2ba
groovyjarjarantlr4.v4.runtime.atn.PredictionContext.IdentityEqualityComparator8cf1c459dda974aa
groovyjarjarantlr4.v4.runtime.atn.PredictionContext.IdentityHashMap6cb9f8368780bca6
groovyjarjarantlr4.v4.runtime.atn.PredictionContextCache7b495b2a6b82cdf2
groovyjarjarantlr4.v4.runtime.atn.PredictionContextCache.IdentityCommutativePredictionContextOperands5894c10b6863627a
groovyjarjarantlr4.v4.runtime.atn.PredictionContextCache.PredictionContextAndInta1462a0b4c280769
groovyjarjarantlr4.v4.runtime.atn.PredictionModed76dcd9b67841a48
groovyjarjarantlr4.v4.runtime.atn.RuleStartStated4d523784c46129b
groovyjarjarantlr4.v4.runtime.atn.RuleStopState2ea2510eae6bcf2f
groovyjarjarantlr4.v4.runtime.atn.RuleTransitiona7a3cd88b8993c6e
groovyjarjarantlr4.v4.runtime.atn.SemanticContextc35755a2cabaf8df
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.ANDc827316e4a71d08b
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.OR31efcd10957e4ac1
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.Operator042cf7a8d9e75882
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.PrecedencePredicateb479e740792bcb09
groovyjarjarantlr4.v4.runtime.atn.SemanticContext.Predicate52792685ad86fd35
groovyjarjarantlr4.v4.runtime.atn.SetTransition7f2d4c173a364be1
groovyjarjarantlr4.v4.runtime.atn.SimulatorState80e061b58829e92c
groovyjarjarantlr4.v4.runtime.atn.SingletonPredictionContextbd6dfec85a96910e
groovyjarjarantlr4.v4.runtime.atn.StarBlockStartState38c96d9cb1c27d3e
groovyjarjarantlr4.v4.runtime.atn.StarLoopEntryState7489ad97863790b2
groovyjarjarantlr4.v4.runtime.atn.StarLoopbackState6490db2296de9b23
groovyjarjarantlr4.v4.runtime.atn.TokensStartState7f4ca1ba7e31e59a
groovyjarjarantlr4.v4.runtime.atn.Transitionb9b5d9f4e9e6f91c
groovyjarjarantlr4.v4.runtime.atn.Transition.1049f4fce8e36f553
groovyjarjarantlr4.v4.runtime.atn.WildcardTransition334b3fd740c99e83
groovyjarjarantlr4.v4.runtime.dfa.AbstractEdgeMap76b6fa2eac30b2a5
groovyjarjarantlr4.v4.runtime.dfa.AcceptStateInfoc61cc7b31df37c84
groovyjarjarantlr4.v4.runtime.dfa.ArrayEdgeMap62ab55637575c99d
groovyjarjarantlr4.v4.runtime.dfa.DFA9dc078051b5ba717
groovyjarjarantlr4.v4.runtime.dfa.DFAState7d0e2365804c1c34
groovyjarjarantlr4.v4.runtime.dfa.DFAState.PredPredictioncbae850a9fdef85b
groovyjarjarantlr4.v4.runtime.dfa.EmptyEdgeMap5b1b544f6070a755
groovyjarjarantlr4.v4.runtime.dfa.HashEdgeMapca3332812dffb60b
groovyjarjarantlr4.v4.runtime.dfa.SingletonEdgeMapc6ad6525ce9578ae
groovyjarjarantlr4.v4.runtime.misc.AbstractEqualityComparator4a97468a24ed89ff
groovyjarjarantlr4.v4.runtime.misc.Args419a5acf4df166f4
groovyjarjarantlr4.v4.runtime.misc.FlexibleHashMap5aef46811a6f48a2
groovyjarjarantlr4.v4.runtime.misc.FlexibleHashMap.Entry2ecb03f37b82403f
groovyjarjarantlr4.v4.runtime.misc.IntegerList13343407b6b10b13
groovyjarjarantlr4.v4.runtime.misc.IntegerStacka7cc1c87effa6485
groovyjarjarantlr4.v4.runtime.misc.Intervalaa18895a3cabff43
groovyjarjarantlr4.v4.runtime.misc.IntervalSet41166def46f2b743
groovyjarjarantlr4.v4.runtime.misc.MurmurHash8bc27981e0f5aa5c
groovyjarjarantlr4.v4.runtime.misc.ObjectEqualityComparatorab9196f89408e802
groovyjarjarantlr4.v4.runtime.misc.ParseCancellationException0a108dc97f2457e4
groovyjarjarantlr4.v4.runtime.misc.Tuplef93ff8de3878fbb1
groovyjarjarantlr4.v4.runtime.misc.Tuple2701265f5a5da7f12
groovyjarjarantlr4.v4.runtime.misc.Tuple365c4a3c1cf984de4
groovyjarjarantlr4.v4.runtime.misc.Utilsb836354f2ee78d46
groovyjarjarantlr4.v4.runtime.tree.AbstractParseTreeVisitorab780e1e2dc99d22
groovyjarjarantlr4.v4.runtime.tree.TerminalNodeImplfd069122800fd65c
groovyjarjarasm.asm.AnnotationVisitora582dae304852a3a
groovyjarjarasm.asm.AnnotationWriter323e09d81bf9bc81
groovyjarjarasm.asm.Attribute3af882d73d0db3fa
groovyjarjarasm.asm.ByteVectorfd975b2b6c7d0bac
groovyjarjarasm.asm.ClassReader51e3b5652da25ae4
groovyjarjarasm.asm.ClassVisitor5accbe935d2c660e
groovyjarjarasm.asm.ClassWritere1d3d3c29886c51a
groovyjarjarasm.asm.Contextca2921a150a476cc
groovyjarjarasm.asm.Edge1910e7921a3f18d1
groovyjarjarasm.asm.FieldVisitor7ce8cef784d91f7b
groovyjarjarasm.asm.FieldWriter56805825350ada0e
groovyjarjarasm.asm.Frame5c987a10c09c30bb
groovyjarjarasm.asm.Handle3ae685c077fd3ec5
groovyjarjarasm.asm.Handler3d092ae20d0ebd09
groovyjarjarasm.asm.Label5d9349d89d0eb389
groovyjarjarasm.asm.MethodVisitoreb133664c71714dd
groovyjarjarasm.asm.MethodWriter23eb407d79137531
groovyjarjarasm.asm.Symboled5e1361384c7418
groovyjarjarasm.asm.SymbolTableed3b5c7a288e9d12
groovyjarjarasm.asm.SymbolTable.Entryc7bf10b4f194a15f
groovyjarjarasm.asm.Type34b8380ea8561271
groovyjarjarasm.asm.TypeReference824b075d190f107d
groovyjarjarasm.asm.signature.SignatureReader416186917e6bf134
groovyjarjarasm.asm.signature.SignatureVisitor199a8179853bfb46
it.Calculus_Stress_Test75944f2cfad03bad
it.Calculus_Stress_Test.__spock_feature_0_0_closure2ff5c48300e1f81e0
it.Calculus_Stress_Test.__spock_feature_0_0_closure3f82a3f5722f36b1e
it.Calculus_Stress_Test.__spock_feature_0_0_closure48788f10a3da24e04
it.Calculus_Stress_Test.__spock_feature_0_0_closure5de210e63e709f11c
it.Calculus_Stress_Test.__spock_feature_0_0_closure6c509d7bbc57997b1
it.Calculus_Stress_Test.__spock_feature_0_2_closure7126f42674e4cb540
it.Calculus_Stress_Test.__spock_feature_0_2_closure82bab38fb6b1cb1c8
it.Calculus_Stress_Test.__spock_feature_0_2_closure9a06e7bd84c4ad281
it.Calculus_Stress_Test.__spock_feature_0_3_closure10fc730584a08a18e9
it.Calculus_Stress_Test.__spock_feature_0_3_closure118c718403dae74371
it.Calculus_Stress_Test._setup_closure128ee6447ca99be00
it.Cross_Device_Sliced_Tensor_System_Test0a285da5bb234dc2
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_0_closure2cc1cbe9f94f9cad1
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_0_closure315a8be2dfafaa87e
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_1_closure4d0179ff10dceca30
it.Cross_Device_Sliced_Tensor_System_Test.__spock_feature_0_1_closure5216f6a437d41ed9f
it.Cross_Device_Sliced_Tensor_System_Test._setup_closure1bc4bf65304577e3b
it.Cross_Device_Spec70d5f697817cad13
it.Cross_Device_Spec.__spock_feature_0_0_closure4388c1e053eb35b8f
it.Cross_Device_Spec.__spock_feature_0_0_closure57c7a7446debc1a5a
it.Cross_Device_Spec.__spock_feature_0_1_closure63fff9777f2d41131
it.Cross_Device_Spec.__spock_feature_0_1_closure7f731019ed4ad0970
it.Cross_Device_Spec.__spock_feature_0_1_closure8da54cdd0d51c1066
it.Cross_Device_Spec.__spock_feature_0_2_closure106573b71b2dea7120
it.Cross_Device_Spec.__spock_feature_0_2_closure1156795c21227d4eb2
it.Cross_Device_Spec.__spock_feature_0_2_closure9b3fdb8f02658bc5e
it.Cross_Device_Spec.__spock_feature_0_3_closure12d88494cf2d1b4af3
it.Cross_Device_Spec.__spock_feature_0_4_closure13773c5b06a7bf00b8
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure1498336b64be9c686d
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure1597b4bdf9bc33e39f
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure16873cc65ebbc37f89
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure1788bb10c3b96cf47b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure189cc9e84e75305ed1
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure19934e3ed3779fd523
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure206d620fb073b0ee5c
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure2162e5d92d711f65ae
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure22ed0014511a1976d0
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure23e287c2cc18b6fd22
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure244e6e6278ed51849f
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure2537777292637ef115
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure26e936092b511026db
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure27494989a37e6951c9
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure281b35ac3d6f2f00a5
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure2968e6cc9172e6d557
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure3010348195ee5a5f80
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure310830e849ed270c2c
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure322648880bd35f2ba2
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure33e0daa920a9962853
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure3421d4111e72f80014
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure35a7207db71a30efed
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure36add069f4717c2fcc
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure372ff5374863d61e0d
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure3875f626311d9a89c5
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure39e1bf93e850ef4f32
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure402099547419cfe7ac
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure41ab70c5b3969854c6
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure4297ab63f112faefa3
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure4320848f4cee2fa526
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure44b2f144e9d59bfb5b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure45729f4b22f13fd1a1
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure4623c2bdcc4fcfc573
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure47876c1ab21f4046e7
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure48a916ae5384003a76
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure49038e1b4e4bb8d645
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure502ce230c96a89b322
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure5183739ea11d6fed3b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure52d3c4684fa315f9e9
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure53eb787569addafede
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure5461ad2d3ca7eca187
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure55c0ccaa68262aecff
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure568c315520d07c2d00
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure57878077194947cfe4
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure58d5fc528758019e88
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure599f5d146b37b2c8bd
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure601c44698984a08c48
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure617df42ef9574e2c37
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure625a3ea4dcf2f812f3
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure634c968b3df6cf3c60
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure648c7233032d2b1427
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure65d2e4bb07a059cff6
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure66a0bf81e81b49fc04
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure675ac4f1f8d9fa3e16
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure6803b30ae63b9eb094
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure694d06b1f50ff35b01
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure702ab11380fe846dbd
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure71e9df21dc2d3c32cf
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure72fdc462051913271b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure7360e06b235450c32f
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure74f187f95b1a629594
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure7579a80cd616bb5bb0
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure76291ffa38a8c14f62
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure77d150feddabe720ee
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure78844c5aa7bc6cb7ce
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure7906e10a21c3dcf54c
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure80506e2a6ce2ed055b
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure8151c93b21d1799471
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure82017ecdcf6f0380a3
it.Cross_Device_Spec.__spock_feature_0_4prov3_closure8303a13ff2d9d66c02
it.Cross_Device_Spec._cleanup_closure3fff19871b7107f3e
it.Cross_Device_Spec._setup_closure1cc646cb69d21f74e
it.Cross_Device_Spec._setup_closure20a3f970f4b3cad9f
it.Eleven_Lines_NN_System_Specea1b5b1046d08426
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure10fdab2b303cc5c3f
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure24cf3c30f32fa8f40
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure3fcafd3bf5b9dccc2
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure48c24a0ae4566064e
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_0_closure53c78b01e2c0145cc
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure101f527b447b245366
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure6ce822871b3867b47
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure70b2da931f9577705
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure858595fa3ac59a19f
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_1_closure9e2364f13c29de21d
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure115362763704c4d7fb
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure129ba40a5930b0d58a
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure134d06de7cdc53d45a
it.Eleven_Lines_NN_System_Spec.__spock_feature_0_3_closure14bb37f2855897d169
kotlin.Pair5b4f95023bc13c05
kotlin._Assertionscff89fc2993d77e6
kotlin.annotation.AnnotationRetention2f2ef082b9689ffd
kotlin.annotation.AnnotationTarget23534f0c0bd8989d
kotlin.collections.ArraysKt___ArraysJvmKtfc390683cbc682c1
kotlin.collections.ArraysKt___ArraysKt0fe701f45c2fda11
kotlin.collections.ArraysUtilJVM9595b65dba34e5b6
kotlin.collections.CollectionsKt__CollectionsJVMKt92b364bdb416ee87
kotlin.collections.CollectionsKt__CollectionsKt4128b8589ce72906
kotlin.collections.CollectionsKt___CollectionsKt222e0dde508d1b78
kotlin.collections.IntIterator81c24fa1364a7e7f
kotlin.internal.ProgressionUtilKt96769c530ca380cc
kotlin.jvm.internal.Intrinsics842f06f411842f7e
kotlin.jvm.internal.Lambdaf48900b9d2030321
kotlin.ranges.IntProgressioncb3448552a98853d
kotlin.ranges.IntProgression.Companion5af88eef648b571d
kotlin.ranges.IntProgressionIteratoref6fed00ed161ee9
kotlin.ranges.IntRange40ee14db18102825
kotlin.ranges.IntRange.Companionb24684c047c46d7a
kotlin.text.CharsKt__CharJVMKt455724c549630747
kotlin.text.StringsKt__StringsKtfee36f2bad010223
net.bytebuddy.ByteBuddy33fbc0829b8e2652
net.bytebuddy.ClassFileVersion041e75a4a43bf8ae
net.bytebuddy.ClassFileVersion.VersionLocator.Resolved5a5903eaf399d371
net.bytebuddy.ClassFileVersion.VersionLocator.Resolverffb81456e25e396b
net.bytebuddy.NamingStrategy.AbstractBase77e9d686c976f6e6
net.bytebuddy.NamingStrategy.Suffixing65bfa03c85847dc9
net.bytebuddy.NamingStrategy.Suffixing.BaseNameResolver.ForUnnamedType1fb9c5c929a4a173
net.bytebuddy.NamingStrategy.SuffixingRandomcdbdedcf0cea0a02
net.bytebuddy.TypeCached02df3631a17fa08
net.bytebuddy.TypeCache.LookupKeyb75da15a4577d948
net.bytebuddy.TypeCache.SimpleKey99731a44c3f39c30
net.bytebuddy.TypeCache.Sort3f135d4f310abf3c
net.bytebuddy.TypeCache.Sort.13be4336e35a8cbfd
net.bytebuddy.TypeCache.Sort.25a2bb9e71930a24a
net.bytebuddy.TypeCache.Sort.35792db85826ac4ba
net.bytebuddy.TypeCache.StorageKeyda984e48de27d4a8
net.bytebuddy.TypeCache.WithInlineExpunction5c74d69cd94d649e
net.bytebuddy.asm.AsmVisitorWrapper.NoOpa613c160b15bbc65
net.bytebuddy.description.ByteCodeElement.Token.TokenList1070489264457774
net.bytebuddy.description.ModifierReviewable.AbstractBase0b625f401d945e23
net.bytebuddy.description.NamedElement.WithDescriptor69f25e85d31086f5
net.bytebuddy.description.TypeVariableSource.AbstractBase86aee374842b91be
net.bytebuddy.description.annotation.AnnotationDescription7e080fcc4ab41eb1
net.bytebuddy.description.annotation.AnnotationDescription.AbstractBase55a8b2f7b58a15aa
net.bytebuddy.description.annotation.AnnotationDescription.ForLoadedAnnotationa2b247526c4d26ca
net.bytebuddy.description.annotation.AnnotationList.AbstractBasec3dca45e359b717d
net.bytebuddy.description.annotation.AnnotationList.Empty10e1e01ec4afb6b0
net.bytebuddy.description.annotation.AnnotationList.Explicitb96636e855735fc3
net.bytebuddy.description.annotation.AnnotationList.ForLoadedAnnotationsa6be8b00fa72ab7a
net.bytebuddy.description.annotation.AnnotationSource.Empty034fcbd435657d97
net.bytebuddy.description.annotation.AnnotationValuee46e60f3e4357d8a
net.bytebuddy.description.annotation.AnnotationValue.AbstractBase6b46c288929d794a
net.bytebuddy.description.annotation.AnnotationValue.ForConstant650f7b88da7502df
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType8683233734d98d81
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.1ecf694f5c718a013
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.2113fe247f14fdcdd
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.3ad40ce4c8d647d57
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.4649136274570c878
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.525519a3723562b18
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.6d0a4ee1eb78e8925
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.75cc6d38c7688ce9e
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.8542fa217a5fe4c51
net.bytebuddy.description.annotation.AnnotationValue.ForConstant.PropertyDelegate.ForNonArrayType.99adc51229ebb26c9
net.bytebuddy.description.annotation.AnnotationValue.ForEnumerationDescription451401174e8ca82f
net.bytebuddy.description.annotation.AnnotationValue.ForTypeDescription256f9475d7baab5e
net.bytebuddy.description.enumeration.EnumerationDescription.AbstractBase36efae2fe3237ba9
net.bytebuddy.description.enumeration.EnumerationDescription.ForLoadedEnumeration5b47cbeca30adac0
net.bytebuddy.description.field.FieldDescription68bfcf27b64f643e
net.bytebuddy.description.field.FieldDescription.AbstractBase8e18b7d4e1ceddcb
net.bytebuddy.description.field.FieldDescription.InDefinedShape.AbstractBasee1174a0c69da5a57
net.bytebuddy.description.field.FieldDescription.Latentf267c31e54d89fa1
net.bytebuddy.description.field.FieldDescription.SignatureToken3fabeebea84ce146
net.bytebuddy.description.field.FieldDescription.Token3f20efc75bd15e42
net.bytebuddy.description.field.FieldList.AbstractBase78739d279005d8a4
net.bytebuddy.description.field.FieldList.ForTokensea98dba6ef4eb758
net.bytebuddy.description.method.MethodDescriptioncb9472a3dd295bbd
net.bytebuddy.description.method.MethodDescription.AbstractBase909086af904cf59b
net.bytebuddy.description.method.MethodDescription.ForLoadedConstructore3c79dd807083c08
net.bytebuddy.description.method.MethodDescription.ForLoadedMethodd9fe344c56539dc6
net.bytebuddy.description.method.MethodDescription.InDefinedShape.AbstractBase673ca3d2d56a4b0a
net.bytebuddy.description.method.MethodDescription.InDefinedShape.AbstractBase.ForLoadedExecutabledb01999a48adc399
net.bytebuddy.description.method.MethodDescription.Latent20e100c8a3802774
net.bytebuddy.description.method.MethodDescription.Latent.TypeInitializer87bee94b36e1d209
net.bytebuddy.description.method.MethodDescription.SignatureToken5888f2557f6a88e0
net.bytebuddy.description.method.MethodDescription.Tokenb268931f291edf88
net.bytebuddy.description.method.MethodDescription.TypeSubstituting8dc21d2e259d2c0f
net.bytebuddy.description.method.MethodDescription.TypeTokenf7f14b8ac76ebd98
net.bytebuddy.description.method.MethodList.AbstractBaseb054427f9b6a48f1
net.bytebuddy.description.method.MethodList.Explicitb03ab4c21a93dfd0
net.bytebuddy.description.method.MethodList.ForLoadedMethods38bd1bf17eb05676
net.bytebuddy.description.method.MethodList.ForTokens40aa960dc7616ac5
net.bytebuddy.description.method.MethodList.TypeSubstitutingf1f510557a04392e
net.bytebuddy.description.method.ParameterDescription.AbstractBase173e1a83772e6071
net.bytebuddy.description.method.ParameterDescription.ForLoadedParameter8dd9bfdcb695c00c
net.bytebuddy.description.method.ParameterDescription.ForLoadedParameter.OfConstructora18e1a81fc7465d0
net.bytebuddy.description.method.ParameterDescription.ForLoadedParameter.OfMethod811597af8855d53c
net.bytebuddy.description.method.ParameterDescription.InDefinedShape.AbstractBase717f5d8d90c005f1
net.bytebuddy.description.method.ParameterDescription.Latent1aa2e08f2ad0d5c2
net.bytebuddy.description.method.ParameterDescription.Token36549650fa40d54b
net.bytebuddy.description.method.ParameterDescription.Token.TypeList1890975119bdb094
net.bytebuddy.description.method.ParameterDescription.TypeSubstituting6cc95e3ea064743d
net.bytebuddy.description.method.ParameterList.AbstractBase6fe6f7a3a2c191ea
net.bytebuddy.description.method.ParameterList.Empty8f4a45d2f54ed28b
net.bytebuddy.description.method.ParameterList.Explicit.ForTypes75d84e0b4fcd99a9
net.bytebuddy.description.method.ParameterList.ForLoadedExecutable1456c072c3be7105
net.bytebuddy.description.method.ParameterList.ForLoadedExecutable.OfConstructor6d7eaa8911075319
net.bytebuddy.description.method.ParameterList.ForLoadedExecutable.OfMethodf0835708e2d15fb4
net.bytebuddy.description.method.ParameterList.ForTokensb77d0ee711552f0c
net.bytebuddy.description.method.ParameterList.TypeSubstituting293f1f350b97c439
net.bytebuddy.description.modifier.ModifierContributor.Resolver4c37457cc5fe415c
net.bytebuddy.description.modifier.SynchronizationState1ee1e76d573ad75b
net.bytebuddy.description.modifier.SyntheticState0ea0b3d14a159257
net.bytebuddy.description.modifier.TypeManifestation823497b74af56cf0
net.bytebuddy.description.modifier.Visibilityeddec8671a9488f2
net.bytebuddy.description.modifier.Visibility.1d7e383ada6123e01
net.bytebuddy.description.type.PackageDescription.AbstractBase21c62ace537a731c
net.bytebuddy.description.type.PackageDescription.ForLoadedPackagee09e684292c5d837
net.bytebuddy.description.type.PackageDescription.Simple69eda86ed42d2bb9
net.bytebuddy.description.type.RecordComponentList.AbstractBasefa2d664156de0c87
net.bytebuddy.description.type.RecordComponentList.ForTokensb72447d1fcbe18bd
net.bytebuddy.description.type.TypeDefinition.Sorte252ac8a021f4082
net.bytebuddy.description.type.TypeDefinition.SuperClassIteratordcc41092c6176f54
net.bytebuddy.description.type.TypeDescription556ed0842dcd3465
net.bytebuddy.description.type.TypeDescription.AbstractBase4c6c19763839e81a
net.bytebuddy.description.type.TypeDescription.AbstractBase.OfSimpleType69db0326a7bad734
net.bytebuddy.description.type.TypeDescription.ArrayProjection6b7a5c44df03385e
net.bytebuddy.description.type.TypeDescription.ForLoadedType1d01f9c5e8968a62
net.bytebuddy.description.type.TypeDescription.Generic56c70a1f7051116d
net.bytebuddy.description.type.TypeDescription.Generic.AbstractBasefd0c51f51c279532
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegatorde36d9ad20e2b0f0
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.Chained7558098cf910a484
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedExecutableExceptionTyped5fff8bf03378188
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedExecutableParameterType4996d253c41df4e9
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedInterface4bff1f5d8d784c94
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedMethodReturnType01b498011a8b99b9
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedSuperClass437ce60855df41c8
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.ForLoadedTypeVariable9eb4905762cd4d43
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.Delegator.Simple64d7d8c1c95ccbbb
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForComponentType8fcd1b7048059fc4
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForOwnerTypee1af0c43565cac6a
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForTypeArgument6058e72d25275a6e
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForTypeVariableBoundType7abf9451d0a396a7
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForTypeVariableBoundType.OfFormalTypeVariablea3a21029de090e64
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.ForWildcardUpperBoundType54ede85d3cdd8d27
net.bytebuddy.description.type.TypeDescription.Generic.AnnotationReader.NoOp1e053fccf208e232
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection9e800f4de9f8e67a
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.ForLoadedReturnTyped1cbaafc701825f7
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.ForLoadedSuperClasse8bb65841de31236
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.OfConstructorParameter5458ef91ab867211
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.OfMethodParameter166cd6a7c9bbe561
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithEagerNavigationf8c6ed45e722570c
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithEagerNavigation.OfAnnotatedElementf66f265575aff87f
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithLazyNavigationce47793fa872e3e3
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithLazyNavigation.OfAnnotatedElement3539fe966936ddc7
net.bytebuddy.description.type.TypeDescription.Generic.LazyProjection.WithResolvedErasure8c44b0d0f77a173e
net.bytebuddy.description.type.TypeDescription.Generic.OfGenericArray8c17323c9acd37d3
net.bytebuddy.description.type.TypeDescription.Generic.OfGenericArray.ForLoadedType062f3d6b0874a5c9
net.bytebuddy.description.type.TypeDescription.Generic.OfGenericArray.Latentfbae4dabdc6cfa54
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericTypea4504dc6edd68cd6
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.ForErasure3db1821ca68ddf5b
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.ForLoadedType986e223380b52659
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.ForReifiedErasure697dfa2b063e4765
net.bytebuddy.description.type.TypeDescription.Generic.OfNonGenericType.Latent37ef0e5fa19941b1
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedTypef7f22a9eb23d0358
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForGenerifiedErasuree0b46fdf5031ad5f
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForLoadedType23a5bcdf87d9f76a
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForLoadedType.ParameterArgumentTypeLista0644f6a2bfa2302
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.ForReifiedTypedb27116a99bedbbc
net.bytebuddy.description.type.TypeDescription.Generic.OfParameterizedType.Latent16bfeb4720d944cd
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariablea5c2d5f7fa973e0e
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.ForLoadedTypecc1d9926ab18067d
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.ForLoadedType.TypeVariableBoundListc5b1a0f68bc48e88
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.Symbolic0bd5958a8dbdf6f1
net.bytebuddy.description.type.TypeDescription.Generic.OfTypeVariable.WithAnnotationOverlay4f1a3b7626a1e4a3
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardTypea01bd5a6a70766e4
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.ForLoadedType1112963cd5495a63
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.ForLoadedType.WildcardLowerBoundTypeListbf9a39c834687055
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.ForLoadedType.WildcardUpperBoundTypeListb8d9619983ce411c
net.bytebuddy.description.type.TypeDescription.Generic.OfWildcardType.Latentac2dd5836f375954
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.ForRawType81cdbc3e9e323aa7
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.ForSignatureVisitor87943f76596da78f
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.ForSignatureVisitor.OfTypeArgumentcf8c08548af1937e
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reducing6ba7675a4c6f9e76
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reifying42a0cb6efaa79250
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reifying.14d33792e587387fc
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Reifying.2b6f2d04f33382efe
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutorc89ede7a99bb5588
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForAttachmentb59a8a86fd7eb87c
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForDetachmenteed7781949d0b181
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForTypeVariableBinding847d85e296b6129b
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForTypeVariableBinding.RetainedMethodTypeVariable79fdd4bceff9a4bb
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.ForTypeVariableBinding.TypeVariableSubstitutorf0ccee83b552d2da
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.Substitutor.WithoutTypeSubstitution7cf629e1c38332c8
net.bytebuddy.description.type.TypeDescription.Generic.Visitor.TypeErasingf24b138643bc0d75
net.bytebuddy.description.type.TypeDescription.Latentf096a156f0f28049
net.bytebuddy.description.type.TypeListda60a7cfb717d0a8
net.bytebuddy.description.type.TypeList.AbstractBase4700315364477234
net.bytebuddy.description.type.TypeList.Empty59d00ad7b53c811a
net.bytebuddy.description.type.TypeList.Explicit81495dfc3a359dfe
net.bytebuddy.description.type.TypeList.ForLoadedTypes4356a7471aec6f20
net.bytebuddy.description.type.TypeList.Generic.AbstractBase5376e1d2298a6512
net.bytebuddy.description.type.TypeList.Generic.Emptydf9431d33e66dbb4
net.bytebuddy.description.type.TypeList.Generic.Explicit1ab8c93e54ee2ac6
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes1b6544725fdb45a6
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes.OfTypeVariables05b85732c40f12b7
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes.OfTypeVariables.AttachedTypeVariable8133514c5d90955c
net.bytebuddy.description.type.TypeList.Generic.ForDetachedTypes.WithResolvedErasure3ae7efc80de7c3db
net.bytebuddy.description.type.TypeList.Generic.ForLoadedTypesc603bfa8790b860c
net.bytebuddy.description.type.TypeList.Generic.ForLoadedTypes.OfTypeVariablesd713fc161a8b3c83
net.bytebuddy.description.type.TypeList.Generic.OfConstructorExceptionTypes41a985dd07ed867c
net.bytebuddy.description.type.TypeList.Generic.OfConstructorExceptionTypes.TypeProjectiona9a42d16f46764ff
net.bytebuddy.description.type.TypeList.Generic.OfLoadedInterfaceTypes99d4f3faf0ed1337
net.bytebuddy.description.type.TypeList.Generic.OfLoadedInterfaceTypes.TypeProjection7f6f3c7654719119
net.bytebuddy.description.type.TypeList.Generic.OfMethodExceptionTypes74966b175ac75ab9
net.bytebuddy.description.type.TypeList.Generic.OfMethodExceptionTypes.TypeProjection2d651d381fd3d0a8
net.bytebuddy.description.type.TypeVariableToken0b904605bce2d673
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase75270e145cd63906
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter07bd203beeec5267
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.FieldDefinitionAdapter9f5304401d876ae6
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodDefinitionAdaptere3a71944fa583d5d
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodDefinitionAdapter.AnnotationAdaptere7b8fe4351f0538b
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodDefinitionAdapter.SimpleParameterAnnotationAdapterceb79ec5f6c81c06
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodMatchAdapterb8cc50e40a151a71
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.MethodMatchAdapter.AnnotationAdapter94485cf54fc4bcd7
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Adapter.OptionalMethodMatchAdaptere422d5f0c3871df1
net.bytebuddy.dynamic.DynamicType.Builder.AbstractBase.Delegatora05019b4f9f0052c
net.bytebuddy.dynamic.DynamicType.Builder.FieldDefinition.Optional.AbstractBasea32270443a37f43d
net.bytebuddy.dynamic.DynamicType.Builder.FieldDefinition.Optional.Valuable.AbstractBase8d99c467e26879f0
net.bytebuddy.dynamic.DynamicType.Builder.FieldDefinition.Optional.Valuable.AbstractBase.Adapterd9f7441f5ac2676d
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.AbstractBasecccc417ee29a5050
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.AbstractBase.Adaptere54b40849750fde9
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ExceptionDefinition.AbstractBased5887f98f1e6e4b8
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ImplementationDefinition.AbstractBase8dc07cfd239f0cc9
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.AbstractBaseb54aa1953aca6e53
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Initial.AbstractBaseadcc1447655c94c5
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Simple.AbstractBasece697a0a4ca85f68
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Simple.Annotatable.AbstractBase5cb69ac9bda4bbd9
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ParameterDefinition.Simple.Annotatable.AbstractBase.Adapterc234394f562d67d3
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.ReceiverTypeDefinition.AbstractBasef3a488041b9994c6
net.bytebuddy.dynamic.DynamicType.Builder.MethodDefinition.TypeVariableDefinition.AbstractBase1e9f7d7e33a9b032
net.bytebuddy.dynamic.DynamicType.Default1735fd7e9316f797
net.bytebuddy.dynamic.DynamicType.Default.Loaded3ce15da51999a8ca
net.bytebuddy.dynamic.DynamicType.Default.Unloaded7dc216f75a73e9b5
net.bytebuddy.dynamic.TargetType26c139b5f2f58862
net.bytebuddy.dynamic.Transformer.Compounda5a52522b43091ef
net.bytebuddy.dynamic.Transformer.ForMethod22ab387d59f6c970
net.bytebuddy.dynamic.Transformer.ForMethod.MethodModifierTransformer829c18ff395159ba
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod083bfd5734c4504d
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod.AttachmentVisitor43014c50e1310fbf
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod.TransformedParameter84642c4a6f0d1bdc
net.bytebuddy.dynamic.Transformer.ForMethod.TransformedMethod.TransformedParameterList54d561afbee57f99
net.bytebuddy.dynamic.Transformer.NoOp49cd89a2b3b975a3
net.bytebuddy.dynamic.TypeResolutionStrategy.Passived5784ee7fb36ce53
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Defaultae8d9f7fd85c6aad
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Default.163c0d42260c7599e
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Default.2a8389e9d32c4ecd7
net.bytebuddy.dynamic.VisibilityBridgeStrategy.Default.330f7afc5a8be245c
net.bytebuddy.dynamic.loading.ByteArrayClassLoader.PersistenceHandler811732d1db761cc5
net.bytebuddy.dynamic.loading.ByteArrayClassLoader.PersistenceHandler.1c9ee72578a4d55a4
net.bytebuddy.dynamic.loading.ByteArrayClassLoader.PersistenceHandler.2f7eb2a49ccc0c5d4
net.bytebuddy.dynamic.loading.ClassInjector.AbstractBase331215a38873f162
net.bytebuddy.dynamic.loading.ClassInjector.UsingLookup68987d870211e579
net.bytebuddy.dynamic.loading.ClassInjector.UsingReflection9b4c6d016e86d89d
net.bytebuddy.dynamic.loading.ClassInjector.UsingReflection.Dispatcher.CreationActione95efd9bc7c2fbec
net.bytebuddy.dynamic.loading.ClassInjector.UsingReflection.Dispatcher.Direct2a61312aae25f447
net.bytebuddy.dynamic.loading.ClassInjector.UsingReflection.Dispatcher.Direct.ForJava7CapableVm5b1e1d52a58d44e8
net.bytebuddy.dynamic.loading.ClassLoadingStrategy17fb081ccc92f99c
net.bytebuddy.dynamic.loading.ClassLoadingStrategy.Default7390ec8634515594
net.bytebuddy.dynamic.loading.ClassLoadingStrategy.Default.InjectionDispatcher759cb7a298fc98b7
net.bytebuddy.dynamic.loading.ClassLoadingStrategy.Default.WrappingDispatcher88c49bdd78533ba6
net.bytebuddy.dynamic.loading.PackageDefinitionStrategy.Definition.Undefined1b8dafe51f80088c
net.bytebuddy.dynamic.loading.PackageDefinitionStrategy.NoOp31480ec85144aa31
net.bytebuddy.dynamic.loading.PackageDefinitionStrategy.Triviald0ed587787d4d89f
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.Defaultf0774d4bbe85a809
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.Default.109a3c2cfe88a5ae4
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.Default.276afb59bd5abdd5f
net.bytebuddy.dynamic.scaffold.ClassWriterStrategy.FrameComputingClassWriter6dcf362306ddc5d0
net.bytebuddy.dynamic.scaffold.FieldLocator.AbstractBasedb8c5004661a0bd8
net.bytebuddy.dynamic.scaffold.FieldLocator.ForClassHierarchy0e8431af1152b965
net.bytebuddy.dynamic.scaffold.FieldLocator.ForClassHierarchy.Factoryd97235dbbc3871e9
net.bytebuddy.dynamic.scaffold.FieldLocator.Resolution.Simple7e3dca01a01498d1
net.bytebuddy.dynamic.scaffold.FieldRegistry.Defaultcc5265630d0906f2
net.bytebuddy.dynamic.scaffold.FieldRegistry.Default.Compiled00933225bc77b175
net.bytebuddy.dynamic.scaffold.FieldRegistry.Default.Compiled.Entry0ec1361a69a955fd
net.bytebuddy.dynamic.scaffold.FieldRegistry.Default.Entrya7413622fd851aa9
net.bytebuddy.dynamic.scaffold.InstrumentedType.Default23fe149436dbe425
net.bytebuddy.dynamic.scaffold.InstrumentedType.Factory.Defaultcd900ae01efd903f
net.bytebuddy.dynamic.scaffold.InstrumentedType.Factory.Default.1a7ce85bb2f37ff77
net.bytebuddy.dynamic.scaffold.InstrumentedType.Factory.Default.2ad157a47dace4f55
net.bytebuddy.dynamic.scaffold.MethodGraph.Compilerfc88be698cc4a50f
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.AbstractBasead55505e167100d9
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Defaulta37bac0e0eceb0c9
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Harmonizer.ForJavaMethod4b92bfc82ab49b25
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Harmonizer.ForJavaMethod.Tokene2da236960e0a189
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key421619c0f44567f3
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Detached82540bbf94c15922
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Harmonized5d9ad1d55d82a355
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Storef948e4de58324a0f
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Ambiguous9e2928a385a525ac
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Initial1fc852958287c36a
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Resolved6672a261c5f5dd2e
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Entry.Resolved.Node0f0b18948cce4159
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Key.Store.Graphf50e2614e64a132c
net.bytebuddy.dynamic.scaffold.MethodGraph.Compiler.Default.Merger.Directional0ba0f74ab7d66be7
net.bytebuddy.dynamic.scaffold.MethodGraph.Linked.Delegation7341085250d5f338
net.bytebuddy.dynamic.scaffold.MethodGraph.Node.Sort8e20af4bf9dad8a0
net.bytebuddy.dynamic.scaffold.MethodGraph.Node.Unresolvedc42332646fb3e771
net.bytebuddy.dynamic.scaffold.MethodGraph.NodeList3f435ec381113f00
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default35ae92274e85ac88
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Compileddd840dc4ea29fc06
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Compiled.Entry827864e42dc177c2
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Entry66b9b2c39c4a08ee
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Prepared3c270a20a21353d7
net.bytebuddy.dynamic.scaffold.MethodRegistry.Default.Prepared.Entrye96586202cb119f0
net.bytebuddy.dynamic.scaffold.MethodRegistry.Handler.ForImplementationea77701fcbc47e2c
net.bytebuddy.dynamic.scaffold.MethodRegistry.Handler.ForImplementation.Compiled7b000ab44a4af2cc
net.bytebuddy.dynamic.scaffold.RecordComponentRegistry.Defaulteec49897d441dcbe
net.bytebuddy.dynamic.scaffold.RecordComponentRegistry.Default.Compiled1d64a300c478cbd4
net.bytebuddy.dynamic.scaffold.TypeInitializer.Drain.Defaulta3bc2736d5ad95f5
net.bytebuddy.dynamic.scaffold.TypeInitializer.Noned062b02ed3f4d342
net.bytebuddy.dynamic.scaffold.TypeInitializer.Simple3429322f4d42e2d4
net.bytebuddy.dynamic.scaffold.TypeValidationb9ab70dc0d5e3c60
net.bytebuddy.dynamic.scaffold.TypeWriter.Defaultb1fd1390cd945339
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.ClassDumpAction.Dispatcher.Disabledc24d1fbb13ce4662
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.ForCreatione49a97cbadf48f53
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.UnresolvedTypecaecc137e21b50cd
net.bytebuddy.dynamic.scaffold.TypeWriter.Default.ValidatingClassVisitor020d1828495e6c79
net.bytebuddy.dynamic.scaffold.TypeWriter.FieldPool.Record.ForExplicitField6db88b0864fac583
net.bytebuddy.dynamic.scaffold.TypeWriter.FieldPool.Record.ForImplicitField4d4ebb611a56f520
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.AccessBridgeWrapperd430962a4ae98a53
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.AccessBridgeWrapper.AccessorBridge0aa41f269546d97e
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.AccessBridgeWrapper.BridgeTarget8583bd798234d430
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.ForDefinedMethod676c34a7d131152e
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.ForDefinedMethod.WithBody175949ac2c965a4e
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.ForNonImplementedMethod1c0608714c9bf7eb
net.bytebuddy.dynamic.scaffold.TypeWriter.MethodPool.Record.Sortce35c6847ba06f22
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default0d114e09a2faac83
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.116fc5c99e02d7f9f
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.2dd199479878d5739
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.3792ea5ce51475037
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.498fceb895a262b45
net.bytebuddy.dynamic.scaffold.subclass.ConstructorStrategy.Default.5f0898605f9020c16
net.bytebuddy.dynamic.scaffold.subclass.SubclassDynamicTypeBuilder0426764fb123b82d
net.bytebuddy.dynamic.scaffold.subclass.SubclassDynamicTypeBuilder.InstrumentableMatcher7bbde13577295432
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget17f509a8b52b39f3
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.Factoryf6c0a700d93e9d10
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.OriginTypeResolver282c73cc811d5b71
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.OriginTypeResolver.12eb773d398b87160
net.bytebuddy.dynamic.scaffold.subclass.SubclassImplementationTarget.OriginTypeResolver.2903a99da03746eb8
net.bytebuddy.implementation.FieldAccessor0174e94238af9d2f
net.bytebuddy.implementation.FieldAccessor.FieldLocation.Relativee3f1a92ea73df3a5
net.bytebuddy.implementation.FieldAccessor.FieldLocation.Relative.Preparedc55029896988613b
net.bytebuddy.implementation.FieldAccessor.FieldNameExtractor.ForFixedValue37f6e575b29ba057
net.bytebuddy.implementation.FieldAccessor.ForImplicitProperty623c50de803e8dff
net.bytebuddy.implementation.FieldAccessor.ForImplicitProperty.Appenderdb2e4aeceee38d5f
net.bytebuddy.implementation.Implementation.Context.Default9436fa8001a9c80c
net.bytebuddy.implementation.Implementation.Context.Default.AbstractPropertyAccessorMethod64d4d27f3bf59427
net.bytebuddy.implementation.Implementation.Context.Default.AccessorMethod0644ef9990e5aa98
net.bytebuddy.implementation.Implementation.Context.Default.AccessorMethodDelegationea3233ec172d7765
net.bytebuddy.implementation.Implementation.Context.Default.CacheValueField0a79d6bd2bd7e867
net.bytebuddy.implementation.Implementation.Context.Default.DelegationRecord490687e4353099fe
net.bytebuddy.implementation.Implementation.Context.Default.Factory655386a029730f91
net.bytebuddy.implementation.Implementation.Context.Default.FieldCacheEntry9b63ee13339c36d9
net.bytebuddy.implementation.Implementation.Context.ExtractableView.AbstractBase072a87c3ee3b1c13
net.bytebuddy.implementation.Implementation.SpecialMethodInvocation.AbstractBase99ac1d4463895d3f
net.bytebuddy.implementation.Implementation.SpecialMethodInvocation.Illegalfe05bdf1b81d2463
net.bytebuddy.implementation.Implementation.SpecialMethodInvocation.Simple7916d516ba029853
net.bytebuddy.implementation.Implementation.Target.AbstractBase891cf9f2a321fafd
net.bytebuddy.implementation.Implementation.Target.AbstractBase.DefaultMethodInvocation29b19b204be139f3
net.bytebuddy.implementation.Implementation.Target.AbstractBase.DefaultMethodInvocation.13ba9a760aa49a971
net.bytebuddy.implementation.Implementation.Target.AbstractBase.DefaultMethodInvocation.28279f38afb254f72
net.bytebuddy.implementation.LoadedTypeInitializer.NoOp1af8ca0d9b7adbe8
net.bytebuddy.implementation.MethodAccessorFactory.AccessTypea8b1b417256441f1
net.bytebuddy.implementation.MethodDelegationa34026f28347c757
net.bytebuddy.implementation.MethodDelegation.Appenderc0bd24965973bfa5
net.bytebuddy.implementation.MethodDelegation.ImplementationDelegate.Compiled.ForStaticCallf75269107418d3a6
net.bytebuddy.implementation.MethodDelegation.ImplementationDelegate.ForStaticMethod44d456b1507ffbbe
net.bytebuddy.implementation.MethodDelegation.WithCustomPropertiesf108031992acdb03
net.bytebuddy.implementation.SuperMethodCall48a9709638c71f00
net.bytebuddy.implementation.SuperMethodCall.Appender1278488d60ed8e86
net.bytebuddy.implementation.SuperMethodCall.Appender.TerminationHandler35d2e0ef6d7f630d
net.bytebuddy.implementation.SuperMethodCall.Appender.TerminationHandler.105664af3a3b6738b
net.bytebuddy.implementation.SuperMethodCall.Appender.TerminationHandler.2be670f96c6d93831
net.bytebuddy.implementation.attribute.AnnotationAppender.Default7787cf7f483d6685
net.bytebuddy.implementation.attribute.AnnotationAppender.ForTypeAnnotations040d5aab72de4582
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnField52ad3ce83f52621f
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnMethodb2534f024a4880dd
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnMethodParameterc9f39d80b694c092
net.bytebuddy.implementation.attribute.AnnotationAppender.Target.OnTypedb8f4f1dbbcf3c3e
net.bytebuddy.implementation.attribute.AnnotationRetention6dca59a58d56874f
net.bytebuddy.implementation.attribute.AnnotationValueFilter.Default190882f8828de18a
net.bytebuddy.implementation.attribute.AnnotationValueFilter.Default.1593737e47cc84848
net.bytebuddy.implementation.attribute.AnnotationValueFilter.Default.2a61861baa0bc96ee
net.bytebuddy.implementation.attribute.FieldAttributeAppender.ForInstrumentedFieldca19f51ae14fb7b4
net.bytebuddy.implementation.attribute.MethodAttributeAppender.ForInstrumentedMethod4e40a53e08d4cbbb
net.bytebuddy.implementation.attribute.MethodAttributeAppender.ForInstrumentedMethod.1a3b87b1a75d290fd
net.bytebuddy.implementation.attribute.MethodAttributeAppender.ForInstrumentedMethod.210e734a991eea3bf
net.bytebuddy.implementation.attribute.MethodAttributeAppender.NoOpaa6841038c96aed0
net.bytebuddy.implementation.attribute.TypeAttributeAppender.ForInstrumentedType537a1dac83c99ae9
net.bytebuddy.implementation.auxiliary.AuxiliaryType577555a7861b5701
net.bytebuddy.implementation.auxiliary.AuxiliaryType.NamingStrategy.SuffixingRandom9ff4d19573d987f3
net.bytebuddy.implementation.bind.ArgumentTypeResolver74973272be85ce17
net.bytebuddy.implementation.bind.ArgumentTypeResolver.ParameterIndexTokena8052b758f0a0361
net.bytebuddy.implementation.bind.DeclaringTypeResolverd1000b5d5bf7bd79
net.bytebuddy.implementation.bind.MethodDelegationBinder.154de841f73ee4eae
net.bytebuddy.implementation.bind.MethodDelegationBinder.AmbiguityResolver7d40b5a2d5d69397
net.bytebuddy.implementation.bind.MethodDelegationBinder.AmbiguityResolver.Compoundeab4a548d2693cd2
net.bytebuddy.implementation.bind.MethodDelegationBinder.AmbiguityResolver.Resolutione8ca39d95b4ade42
net.bytebuddy.implementation.bind.MethodDelegationBinder.BindingResolver.Defaulted3f9e212bdf4696
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodBinding.Builderffaacecf2e1956bd
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodBinding.Builder.Buildfbe15ed2c0b7c26f
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodBinding.Illegalca301be97fe35cde
net.bytebuddy.implementation.bind.MethodDelegationBinder.MethodInvoker.Simpledafea2ba3b2f164b
net.bytebuddy.implementation.bind.MethodDelegationBinder.ParameterBinding.Anonymous30b0f734840f8b2c
net.bytebuddy.implementation.bind.MethodDelegationBinder.ParameterBinding.Illegal470dc52d77c3898e
net.bytebuddy.implementation.bind.MethodDelegationBinder.Processor1dd9238ba412581f
net.bytebuddy.implementation.bind.MethodDelegationBinder.TerminationHandler.Default946265fda2ca27e8
net.bytebuddy.implementation.bind.MethodDelegationBinder.TerminationHandler.Default.1db109132d7373fda
net.bytebuddy.implementation.bind.MethodDelegationBinder.TerminationHandler.Default.2cb3895b610bd15d5
net.bytebuddy.implementation.bind.MethodNameEqualityResolver65a8d1431b34fdcd
net.bytebuddy.implementation.bind.ParameterLengthResolver58a025cd0f10dff1
net.bytebuddy.implementation.bind.annotation.AllArguments.Assignmentbfcd0244baa95f1b
net.bytebuddy.implementation.bind.annotation.AllArguments.Binder7ed5bf64ac194c84
net.bytebuddy.implementation.bind.annotation.Argument.Binder9d613cfc7a8f0cd6
net.bytebuddy.implementation.bind.annotation.BindingPriority.Resolver2fd170c18c979895
net.bytebuddy.implementation.bind.annotation.Default.Binderfdd8dd2baa86d3db
net.bytebuddy.implementation.bind.annotation.DefaultCall.Binderd7e4b58cec267a0e
net.bytebuddy.implementation.bind.annotation.DefaultMethod.Binder03d209c7b50b3b07
net.bytebuddy.implementation.bind.annotation.Empty.Binder6af2e8e3cdad25b3
net.bytebuddy.implementation.bind.annotation.FieldValue.Binderffe1f66fdf57240f
net.bytebuddy.implementation.bind.annotation.FieldValue.Binder.Delegateb16d4f0b5def41e9
net.bytebuddy.implementation.bind.annotation.IgnoreForBinding.Verifierf6eaa0a37f2ce769
net.bytebuddy.implementation.bind.annotation.Morph.Binder221cfe9babd6b9a8
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxyf26adba7c4fb3aad
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.InstanceFieldConstructor82810ce8d8d3c7f6
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.InstanceFieldConstructor.Appender5a9bd62b26738c1e
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.MethodCall90fc1233517a7f78
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.MethodCall.Appender30ecfa36670ca8d7
net.bytebuddy.implementation.bind.annotation.Morph.Binder.RedirectionProxy.StaticFieldConstructor4944bebd8a49b603
net.bytebuddy.implementation.bind.annotation.Origin.Binder58bfe04015269f97
net.bytebuddy.implementation.bind.annotation.RuntimeType.Verifier79ef98193cf36f83
net.bytebuddy.implementation.bind.annotation.StubValue.Binderc5dcbbaafc956a20
net.bytebuddy.implementation.bind.annotation.Super.Binder159db3adf8f80917
net.bytebuddy.implementation.bind.annotation.SuperCall.Binderd504027b57aeebbe
net.bytebuddy.implementation.bind.annotation.SuperMethod.Binder787b81ea7c3cf9d1
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBindera9644f0a487b56f8
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.DelegationProcessor08e777de45b651f6
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.DelegationProcessor.Handler.Boundfe4b74c6469cb373
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.DelegationProcessor.Handler.Unbound53b08d554175038c
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.ParameterBinder6f273cd5a9428c36
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.ParameterBinder.ForFieldBinding49c4acf91fc87123
net.bytebuddy.implementation.bind.annotation.TargetMethodAnnotationDrivenBinder.Recordf5597b43768b5a7b
net.bytebuddy.implementation.bind.annotation.This.Binderb3e837fb5b95fa04
net.bytebuddy.implementation.bytecode.ByteCodeAppender.Compound0f6ce72d7ea48338
net.bytebuddy.implementation.bytecode.ByteCodeAppender.Simple3d7cd79d87926f75
net.bytebuddy.implementation.bytecode.ByteCodeAppender.Size897030ac0b46252c
net.bytebuddy.implementation.bytecode.Duplication87726ed8bb6e39de
net.bytebuddy.implementation.bytecode.Duplication.16cbf4aae44bb9c6a
net.bytebuddy.implementation.bytecode.Duplication.2204abf23cbf37c68
net.bytebuddy.implementation.bytecode.Duplication.30631976e078609bd
net.bytebuddy.implementation.bytecode.Removal6d539a300caa5092
net.bytebuddy.implementation.bytecode.Removal.1ab763f3b743f79a5
net.bytebuddy.implementation.bytecode.Removal.2fd766afb93ac2a09
net.bytebuddy.implementation.bytecode.StackManipulation.AbstractBase31ac4a0904ac3e09
net.bytebuddy.implementation.bytecode.StackManipulation.Compound96939a22aac4c91b
net.bytebuddy.implementation.bytecode.StackManipulation.Illegald75e2eb0d394f6c3
net.bytebuddy.implementation.bytecode.StackManipulation.Sizee69b15cd3e8d4461
net.bytebuddy.implementation.bytecode.StackManipulation.Trivial56f2787cdbce4d40
net.bytebuddy.implementation.bytecode.StackSize80f94e8effa2f7bb
net.bytebuddy.implementation.bytecode.TypeCreation4865d2e454028bc1
net.bytebuddy.implementation.bytecode.assign.Assigner7e67d52e9390b000
net.bytebuddy.implementation.bytecode.assign.Assigner.Typingb09adf7fa17d04b8
net.bytebuddy.implementation.bytecode.assign.TypeCasting1a445bd188e2931d
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveBoxingDelegatedac9a66a711d1bdb
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveBoxingDelegate.BoxingStackManipulation96e0379915a5a251
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveTypeAwareAssignerc888a19b998b7769
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveUnboxingDelegate14e47d44e5cebb1d
net.bytebuddy.implementation.bytecode.assign.primitive.PrimitiveUnboxingDelegate.ImplicitlyTypedUnboxingResponsibleadf7d49661fe0566
net.bytebuddy.implementation.bytecode.assign.primitive.VoidAwareAssigner3df36760b29d387a
net.bytebuddy.implementation.bytecode.assign.reference.GenericTypeAwareAssigner3623cb487284bb53
net.bytebuddy.implementation.bytecode.assign.reference.ReferenceTypeAwareAssigner59b5f6f8641c87f2
net.bytebuddy.implementation.bytecode.collection.ArrayAccessee5ac198c6726a54
net.bytebuddy.implementation.bytecode.collection.ArrayAccess.Loader4998ef35821cb2a3
net.bytebuddy.implementation.bytecode.collection.ArrayFactoryf2dcfb1430649b3e
net.bytebuddy.implementation.bytecode.collection.ArrayFactory.ArrayCreator7ff584cc516e3f40
net.bytebuddy.implementation.bytecode.collection.ArrayFactory.ArrayCreator.ForReferenceType2ffee25860dde2e1
net.bytebuddy.implementation.bytecode.collection.ArrayFactory.ArrayStackManipulation2420354f9fdfb502
net.bytebuddy.implementation.bytecode.constant.ClassConstant8c2c8e360f844ad5
net.bytebuddy.implementation.bytecode.constant.ClassConstant.ForReferenceTypea779a54b4d7fcd6c
net.bytebuddy.implementation.bytecode.constant.DefaultValue56544d5987e5a6d8
net.bytebuddy.implementation.bytecode.constant.DoubleConstant829c95b7b67e95cf
net.bytebuddy.implementation.bytecode.constant.FloatConstantbdee038754940fff
net.bytebuddy.implementation.bytecode.constant.IntegerConstant58a28f871a6a0499
net.bytebuddy.implementation.bytecode.constant.LongConstant113f925135fa3020
net.bytebuddy.implementation.bytecode.constant.MethodConstant55d1fac9a2312bd2
net.bytebuddy.implementation.bytecode.constant.MethodConstant.CachedMethod927dce16203d5f6c
net.bytebuddy.implementation.bytecode.constant.MethodConstant.ForMethod5c66dba4a8bfbcea
net.bytebuddy.implementation.bytecode.constant.NullConstant9cf4bfc5c52a2517
net.bytebuddy.implementation.bytecode.constant.TextConstant76b9599de59f2aeb
net.bytebuddy.implementation.bytecode.member.FieldAccesse098860a4703e90a
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher20c90535a547e3cd
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher.AbstractFieldInstruction75724b7b6b2e4a66
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher.FieldGetInstructionadcac7724ac0272c
net.bytebuddy.implementation.bytecode.member.FieldAccess.AccessDispatcher.FieldPutInstructionaeaedb775e139b65
net.bytebuddy.implementation.bytecode.member.MethodInvocationccdb8e0f61d03f72
net.bytebuddy.implementation.bytecode.member.MethodInvocation.Invocation7edd2eb29addcb20
net.bytebuddy.implementation.bytecode.member.MethodInvocation.OfGenericMethod5254ba2d92c92d3b
net.bytebuddy.implementation.bytecode.member.MethodReturn3cbfd6833fda70dd
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess7ec211e72c6c3719
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.MethodLoading0b690307be533e18
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.MethodLoading.TypeCastingHandler.ForBridgeTarget89785916231f4dad
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.MethodLoading.TypeCastingHandler.NoOp3f3d0d86b569e241
net.bytebuddy.implementation.bytecode.member.MethodVariableAccess.OffsetLoading4794627822a950ec
net.bytebuddy.jar.asm.AnnotationWriter0932d72e909ca807
net.bytebuddy.jar.asm.Attribute706e3dca943537f4
net.bytebuddy.jar.asm.ByteVector202001c737179f70
net.bytebuddy.jar.asm.ClassVisitor31cdb4a9a90ec9ca
net.bytebuddy.jar.asm.ClassWriter5ae0ee3b90595eef
net.bytebuddy.jar.asm.FieldVisitor476724e2a3739cdb
net.bytebuddy.jar.asm.FieldWriter3c4ebfcb2bc7032e
net.bytebuddy.jar.asm.Handler763c7a3b0dc4fc7e
net.bytebuddy.jar.asm.MethodVisitor196dbaf0d45984ba
net.bytebuddy.jar.asm.MethodWriter76fc9326535687d1
net.bytebuddy.jar.asm.Symbolf44d88efeab63dac
net.bytebuddy.jar.asm.SymbolTable00001f478e852135
net.bytebuddy.jar.asm.SymbolTable.Entry904cbca1953e75e2
net.bytebuddy.jar.asm.Type45a01df29df18510
net.bytebuddy.jar.asm.TypeReference7c2c246da0bafedc
net.bytebuddy.jar.asm.signature.SignatureVisitorba629ff09a5c44a8
net.bytebuddy.jar.asm.signature.SignatureWriterc8f0c38b6698b545
net.bytebuddy.matcher.AnnotationTypeMatcher4c083a293a95675e
net.bytebuddy.matcher.BooleanMatcherfc276a6c128e2875
net.bytebuddy.matcher.CollectionItemMatcher640386844f0e29b8
net.bytebuddy.matcher.CollectionSizeMatcher8f59b8be9ab4a58b
net.bytebuddy.matcher.DeclaringTypeMatcher76e282c5482618bb
net.bytebuddy.matcher.ElementMatcher.Junction.AbstractBased129e1a5bbea50cb
net.bytebuddy.matcher.ElementMatcher.Junction.Conjunction6586c7d2abf8bf59
net.bytebuddy.matcher.ElementMatcher.Junction.Disjunction78eb86ff19c5e913
net.bytebuddy.matcher.ElementMatcher.Junction.ForNonNullValues40b97e222b442c20
net.bytebuddy.matcher.ElementMatchersd173e8185d30d23b
net.bytebuddy.matcher.EqualityMatcher7ddcccca3867f2c6
net.bytebuddy.matcher.ErasureMatcher327b39df894c794a
net.bytebuddy.matcher.FilterableList.AbstractBaseacc833b482b3e913
net.bytebuddy.matcher.FilterableList.Empty994e694dc878695f
net.bytebuddy.matcher.LatentMatcher.ForFieldToken9bce736b4fed0d2f
net.bytebuddy.matcher.LatentMatcher.ForFieldToken.ResolvedMatchercb20c74d5d2fa4f2
net.bytebuddy.matcher.LatentMatcher.ForMethodToken3f8fdb09acae36bc
net.bytebuddy.matcher.LatentMatcher.ForMethodToken.ResolvedMatcher0c0799981f6a5074
net.bytebuddy.matcher.LatentMatcher.Resolved415ddaef2832e0a1
net.bytebuddy.matcher.MethodParameterTypeMatcherd565dce3bed4679b
net.bytebuddy.matcher.MethodParametersMatcher754bf9d07553d1f9
net.bytebuddy.matcher.MethodReturnTypeMatcher1b6fa22a35a706bc
net.bytebuddy.matcher.MethodSortMatcherd9a4a7f8ba8d705a
net.bytebuddy.matcher.MethodSortMatcher.Sortdf4da3ccf1c43fb2
net.bytebuddy.matcher.MethodSortMatcher.Sort.19f8edcf420246fae
net.bytebuddy.matcher.MethodSortMatcher.Sort.25b30e294f2304972
net.bytebuddy.matcher.MethodSortMatcher.Sort.39c8b9e468a9ba4ee
net.bytebuddy.matcher.MethodSortMatcher.Sort.44c3709005a13f932
net.bytebuddy.matcher.MethodSortMatcher.Sort.593400b67a6230353
net.bytebuddy.matcher.ModifierMatcherc0d2e66fbd31c083
net.bytebuddy.matcher.ModifierMatcher.Mode09bd88f8f539be92
net.bytebuddy.matcher.NameMatcherb901fc4b35799fa4
net.bytebuddy.matcher.NegatingMatchera7d93978e9d78d7e
net.bytebuddy.matcher.SignatureTokenMatcher60c758b99c3d9148
net.bytebuddy.matcher.StringMatcher236df1d1d60ab580
net.bytebuddy.matcher.StringMatcher.Mode78a8ab1a5e998326
net.bytebuddy.matcher.StringMatcher.Mode.1197cd818fecbf0dc
net.bytebuddy.matcher.StringMatcher.Mode.2130a12e752b093e0
net.bytebuddy.matcher.StringMatcher.Mode.337e1825b2b41bae8
net.bytebuddy.matcher.StringMatcher.Mode.434a59e75ad57ee16
net.bytebuddy.matcher.StringMatcher.Mode.56b18de0e0195fcc7
net.bytebuddy.matcher.StringMatcher.Mode.6bdaf5299d13e3bfe
net.bytebuddy.matcher.StringMatcher.Mode.7f608050eb76b29c9
net.bytebuddy.matcher.StringMatcher.Mode.87a1f43a330aa49e3
net.bytebuddy.matcher.StringMatcher.Mode.9d97cfe0669542624
net.bytebuddy.matcher.SuperTypeMatcher5f65e9ccb1649334
net.bytebuddy.matcher.TypeSortMatcherbea3cd319f7a9ab6
net.bytebuddy.matcher.VisibilityMatcher6f0d2c70b6ce50e1
net.bytebuddy.pool.TypePool.AbstractBase03ef41c73bcdac6f
net.bytebuddy.pool.TypePool.AbstractBase.Hierarchical1ef4bf1634aa9314
net.bytebuddy.pool.TypePool.CacheProvider.Simple3b477cf62a71a399
net.bytebuddy.pool.TypePool.ClassLoadingf60fbd5bc692f3c0
net.bytebuddy.pool.TypePool.Empty8c0a9ed2a729f1ac
net.bytebuddy.utility.CompoundListb8b501baeee21c20
net.bytebuddy.utility.ConstructorComparatorc7333b6b982e8e09
net.bytebuddy.utility.GraalImageCode99c2d8870a99ec8c
net.bytebuddy.utility.Invoker.Dispatcherba1a34ac612fb532
net.bytebuddy.utility.JavaModulec663e648dff68e8b
net.bytebuddy.utility.JavaTyped4f7d1c4025d005b
net.bytebuddy.utility.JavaType.LatentTypeWithSimpleName420041c8025136fc
net.bytebuddy.utility.MethodComparator4e5549fe1a1bb16a
net.bytebuddy.utility.RandomString2eafdcdb79f5efa0
net.bytebuddy.utility.dispatcher.JavaDispatcher3c1bd07bc810ae2b
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForDefaultValue1dcd9c6eda6643f1
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForDefaultValue.OfNonPrimitiveArraydb54573a56a2be1a
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForInstanceCheck7dbb26a52ed326b5
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForNonStaticMethodf63395a9698d65ee
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForStaticMethod2951db96c63571df
net.bytebuddy.utility.dispatcher.JavaDispatcher.Dispatcher.ForUnresolvedMethodb54774b9a5e2729e
net.bytebuddy.utility.dispatcher.JavaDispatcher.DynamicClassLoaderb0851709ab51952f
net.bytebuddy.utility.dispatcher.JavaDispatcher.DynamicClassLoader.Resolver.CreationAction649b66a6bb185e58
net.bytebuddy.utility.dispatcher.JavaDispatcher.DynamicClassLoader.Resolver.NoOpd02463d4983d1cc1
net.bytebuddy.utility.dispatcher.JavaDispatcher.InvokerCreationAction97a1f6e01df99348
net.bytebuddy.utility.dispatcher.JavaDispatcher.ProxiedInvocationHandler8b115083d4a498dd
net.bytebuddy.utility.privilege.GetMethodAction74124300a1be96ce
net.bytebuddy.utility.privilege.GetSystemPropertyAction3dcb9c5481b99d57
neureka.AbstractNda1779289d30f549db
neureka.Dataef9e1205965c83e9
neureka.MutateTensorf2ed7fab6cd11880
neureka.Nda2600842b3aae36cf
neureka.Nda.Iteme0073d732c8d891c
neureka.Neureka3249d13750762583
neureka.Neureka.Settings2c1cba55f3c56bb2
neureka.Neureka.Settings.AutoGradae1c47419b533cc1
neureka.Neureka.Settings.DType5c2c106149eb7c0a
neureka.Neureka.Settings.Debug05adbfe30017715c
neureka.Neureka.Settings.NDimeba2ecd032486736
neureka.Neureka.Settings.View241ab2f6b4a5b9b7
neureka.Neureka.Utilitydef5b5db219866ee
neureka.NoOpDataf0de2d865d736830
neureka.Shape5892ad4cee7ea6c9
neureka.Shape.10994c6b41956c037
neureka.Shape.2d5ef650248b28be7
neureka.Tensor8acd7c3cba847973
neureka.Tensor.ImageType405474c0ac8990b0
neureka.TensorConstructor1dfb1f5d37c8dad7
neureka.TensorConstructor.Args68467e0f9f62ae19
neureka.TensorImple9f54d2cb27797f3
neureka.TensorImpl.1bfbb44317567ce9d
neureka.TensorImpl.2fd3ff0671d9071bf
neureka.TensorImpl.3cf1c667a90d7c81c
neureka.autograd.ADAction8edf427dfe5e50eb
neureka.autograd.ADTargetc34981b30988c2aa
neureka.autograd.BackPropTargetCollector0a0a6787ec107a0d
neureka.autograd.BackPropTargetCollector.Value6dfe213fae323943
neureka.autograd.BackPropTargetsd2117b18f52d2c4e
neureka.autograd.DefaultADAction5fcecbcece310b04
neureka.autograd.GraphNode19a881154a73d08c
neureka.autograd.GraphNode.175fd6e24beaf98e8
neureka.autograd.GraphNode.Print55e9b091d86d575b
neureka.autograd.GraphNodeUtility66afcfeb6326d65b
neureka.autograd.JITProp859598b1575bb869
neureka.autograd.NodePayload27c07218616e0da0
neureka.autograd.PendingError3552e21f002dad19
neureka.backend.api.Algorithm87f43fc553430a64
neureka.backend.api.AutoDiffMode66b7f9282f6914b1
neureka.backend.api.BackendContext626c463d41a9b922
neureka.backend.api.BackendContext.10bd85614c079bf94
neureka.backend.api.BackendContext.2c9562caf8ed310cf
neureka.backend.api.BackendContext.Registeredcd7485ed1a25b8ce
neureka.backend.api.BackendContext.Runner126787182466c97f
neureka.backend.api.BackendExtensionc3ad261f366c9417
neureka.backend.api.BackendExtension.DeviceOptionc895b89759546a5c
neureka.backend.api.Call3cf694eaea3d958e
neureka.backend.api.Call.Builder9997066d8e891e21
neureka.backend.api.Call.Validator056354b632ecef76
neureka.backend.api.Call.Validator.Estimatora8e4eb5b2332182e
neureka.backend.api.DeviceAlgorithm71165f8fb4871639
neureka.backend.api.ExecutionCall0a44ae808debf8ae
neureka.backend.api.ExecutionCall.Builderfa6ea88b470dee6b
neureka.backend.api.Extensionscdef7a145cb34994
neureka.backend.api.LazyRefaadc2d5f9cfa9a08
neureka.backend.api.Operation7101a9f2333bce62
neureka.backend.api.Resulta0c5274cc97a4151
neureka.backend.api.ini.BackendRegistry4f54dd0315835b3d
neureka.backend.api.ini.BackendRegistry.1c9095e877dbbbd5d
neureka.backend.api.ini.BackendRegistry.1.108f5f740a2ff67e7
neureka.backend.api.template.algorithms.AbstractAlgorithm2cf0d6e2bdc87fe1
neureka.backend.api.template.algorithms.AbstractDeviceAlgorithmab7808a3eb94cb4d
neureka.backend.api.template.algorithms.AbstractFunAlgorithm3e105f2d01d42459
neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithme3f3a0b7ecfbf060
neureka.backend.api.template.algorithms.FallbackAlgorithm7d32a81a2110fd77
neureka.backend.api.template.algorithms.FunAlgorithm7fb7d9396ecae06a
neureka.backend.api.template.algorithms.FunDeviceAlgorithmd3387061538082d7
neureka.backend.api.template.implementations.AbstractImplementationFor2e56bf8027ba7611
neureka.backend.api.template.operations.AbstractOperation2b04cdb0e78581ca
neureka.backend.api.template.operations.OperationBuilder8f3bb4533aeeb299
neureka.backend.api.template.operations.OperationBuilder.172c23f2683663a9c
neureka.backend.cpu.CPUBackendfd8ce0de1a868713
neureka.backend.main.algorithms.BiElementwisead5ea16b43044617
neureka.backend.main.algorithms.BiScalarBroadcastdd004523d0bca41a
neureka.backend.main.algorithms.Broadcasta572ca63bbaad5f0
neureka.backend.main.algorithms.DotProductAlgorithmfc4960a699eb3931
neureka.backend.main.algorithms.ElementwiseAlgorithmdb5f6f013d5269aa
neureka.backend.main.algorithms.MatMulAlgorithme44a54858de2bcca
neureka.backend.main.algorithms.NDConvolution08a4bf1def3630c8
neureka.backend.main.algorithms.ScalarAlgorithm67e21aec3e16525e
neureka.backend.main.algorithms.ScalarBroadcast726e5ad208167b0d
neureka.backend.main.algorithms.ScalarSumAlgorithm4365d9065ccd4718
neureka.backend.main.algorithms.SumAlgorithm9675b26b931a6a3b
neureka.backend.main.algorithms.Util2f2d1d06ce40846a
neureka.backend.main.implementations.CPUImplementation4a3234aadcee8110
neureka.backend.main.implementations.broadcast.CPUBroadcastb6964cbb70f4ec6c
neureka.backend.main.implementations.broadcast.CPUBroadcastAdditionf3284be54b8a85db
neureka.backend.main.implementations.broadcast.CPUBroadcastAddition.12f61a90c7c6944f3
neureka.backend.main.implementations.broadcast.CPUBroadcastDivision91c3415ea870122c
neureka.backend.main.implementations.broadcast.CPUBroadcastDivision.1cdf4521cf5e9dc6c
neureka.backend.main.implementations.broadcast.CPUBroadcastModulof2aa119b74d99d8c
neureka.backend.main.implementations.broadcast.CPUBroadcastModulo.14b75e9718b17d57b
neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication3336bfad8e88bbc2
neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication.1947c0bb74cf224c6
neureka.backend.main.implementations.broadcast.CPUBroadcastPower2aa058d2461ddafa
neureka.backend.main.implementations.broadcast.CPUBroadcastPower.1d0022582a0170a3f
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction6a4c85d0d663cc93
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction.116a01c7be0618360
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction.278124605c2056e72
neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction.3558ddfd2daa47ac2
neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower810fa7da210b6df2
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastf011ae36d06503c3
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition3a02bae0e0f4c47d
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition.17a9edcea1e401ef2
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision20843e9808f1e558
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentitydb452fa5df5d9841
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity.1eb18156c83a44643
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo609a505abbfeda45
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplicatione8314b9a0cddc3d5
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication.14276f45a61c0f4ba
neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction2d6dac9e9123cf88
neureka.backend.main.implementations.convolution.AbstractCPUConvolution94c083801a27054f
neureka.backend.main.implementations.convolution.CPUConvolutiond2d550a4e8b5a45f
neureka.backend.main.implementations.convolution.CPUConvolution.1b0f840b95d2ddb07
neureka.backend.main.implementations.convolution.SimpleCPUConvolutiona302ca3f418ec236
neureka.backend.main.implementations.convolution.SimpleCPUConvolution.ImplF3217a9fd6c8d179af2
neureka.backend.main.implementations.convolution.SimpleCPUConvolution.ImplF643fe1e110e60ec2bc
neureka.backend.main.implementations.elementwise.CPUBiElementWise3c87af26e8cd72ed
neureka.backend.main.implementations.elementwise.CPUBiElementWiseAdditiona19133f7f06f4374
neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition.1b02b2f11278d479a
neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition.2d5b26f43d982ceab
neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision1a34ce57e1d3f00d
neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision.16737ce31e8b2e80e
neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision.21ab134d6a4d638cd
neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo62bb810bb4349e40
neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo.1018fc62fecf7ac19
neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo.2f68c738835ce3bb8
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication6b43b1762323b904
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication.15c6c65e5f6be3735
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication.228252f98e4deb97a
neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication.3ba9b3b197f7c1bfd
neureka.backend.main.implementations.elementwise.CPUBiElementWisePower4ac0683ea811d44a
neureka.backend.main.implementations.elementwise.CPUBiElementWisePower.1cfe4d219d8f4c5cd
neureka.backend.main.implementations.elementwise.CPUBiElementWisePower.215090aece9a2c245
neureka.backend.main.implementations.elementwise.CPUBiElementWisePower.36e021d48d358476d
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtractionc6e75194cae8d06d
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction.1fd20ae89b0bee414
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction.215be47783347dd5b
neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction.33a5e64b6ab057aef
neureka.backend.main.implementations.elementwise.CPUElementwiseAssignFundf35cefe568dc9c8
neureka.backend.main.implementations.elementwise.CPUElementwiseFunctionbe9d6701da8d4ef2
neureka.backend.main.implementations.elementwise.CPURandomization3920c2a204cce44f
neureka.backend.main.implementations.elementwise.CPURandomization.10aca726ebeca7f88
neureka.backend.main.implementations.elementwise.CPURandomization.2eb85d0036ba91c3a
neureka.backend.main.implementations.fun.FunUtila68e6377f7e3f487
neureka.backend.main.implementations.fun.ScalarAbsolutef3c310a1027da06d
neureka.backend.main.implementations.fun.ScalarAbsolute.12971875b0e85d370
neureka.backend.main.implementations.fun.ScalarAbsolute.2f18f537ea2f83050
neureka.backend.main.implementations.fun.ScalarCbrt62dcfaf7dfdcb92c
neureka.backend.main.implementations.fun.ScalarCbrt.1b2ea59e1514925c7
neureka.backend.main.implementations.fun.ScalarCosinus624b604f890a3671
neureka.backend.main.implementations.fun.ScalarCosinus.1384e394cafc6223b
neureka.backend.main.implementations.fun.ScalarCosinus.229fe4866f5294ee7
neureka.backend.main.implementations.fun.ScalarExpd24fca46227b17ba
neureka.backend.main.implementations.fun.ScalarExp.1fe6a4ad207f3266e
neureka.backend.main.implementations.fun.ScalarExp.242573aa97f6f6e27
neureka.backend.main.implementations.fun.ScalarGaSUb0d860047872f0bf
neureka.backend.main.implementations.fun.ScalarGaSU.1adf50e2cf61c2a84
neureka.backend.main.implementations.fun.ScalarGaSU.26759d2591d2b5a36
neureka.backend.main.implementations.fun.ScalarGaTU2edc74c79255dd7f
neureka.backend.main.implementations.fun.ScalarGaTU.135fa8188d7e5be6b
neureka.backend.main.implementations.fun.ScalarGaTU.2a598bb10591a9906
neureka.backend.main.implementations.fun.ScalarGaussian0694fc9b61690a6c
neureka.backend.main.implementations.fun.ScalarGaussian.18eb283931922d5fa
neureka.backend.main.implementations.fun.ScalarGaussianFast42e0a21c95b5c4c4
neureka.backend.main.implementations.fun.ScalarGaussianFast.12d065abda4e1d3f0
neureka.backend.main.implementations.fun.ScalarGaussianFast.2e6cfc291bf155138
neureka.backend.main.implementations.fun.ScalarGeLUe2f42e25aa8fe622
neureka.backend.main.implementations.fun.ScalarGeLU.1de68a5274507414a
neureka.backend.main.implementations.fun.ScalarGeLU.26b0dc02b3f9cc77c
neureka.backend.main.implementations.fun.ScalarIdentitybabacc00fadf36b7
neureka.backend.main.implementations.fun.ScalarIdentity.158e33f69c9d3a049
neureka.backend.main.implementations.fun.ScalarLog103d73adb71398c80a
neureka.backend.main.implementations.fun.ScalarLog10.1ac0f7de19bae0190
neureka.backend.main.implementations.fun.ScalarLogarithmfd8983fab442778f
neureka.backend.main.implementations.fun.ScalarLogarithm.1d9367d3f9b6ddce4
neureka.backend.main.implementations.fun.ScalarLogarithm.2540acdf958fd236f
neureka.backend.main.implementations.fun.ScalarQuadratic99f6a99823092c61
neureka.backend.main.implementations.fun.ScalarQuadratic.1d9c0518c082b137a
neureka.backend.main.implementations.fun.ScalarQuadratic.2326f21410d6f3d0c
neureka.backend.main.implementations.fun.ScalarReLU1b355ea8367504ca
neureka.backend.main.implementations.fun.ScalarReLU.159bde8237c79077b
neureka.backend.main.implementations.fun.ScalarReLU.2d7418576bfb5dcf1
neureka.backend.main.implementations.fun.ScalarSeLU7d86c20f0a86e4e3
neureka.backend.main.implementations.fun.ScalarSeLU.1c2b830205d3c9b74
neureka.backend.main.implementations.fun.ScalarSeLU.2bfb876641808b416
neureka.backend.main.implementations.fun.ScalarSiLU2003b87a7718e856
neureka.backend.main.implementations.fun.ScalarSiLU.178f220543a03a144
neureka.backend.main.implementations.fun.ScalarSiLU.281c6f453128d537c
neureka.backend.main.implementations.fun.ScalarSigmoidc19c718667fdf1fe
neureka.backend.main.implementations.fun.ScalarSigmoid.1537c4fbb7efec86d
neureka.backend.main.implementations.fun.ScalarSigmoid.2b7ef8e89ea33eff9
neureka.backend.main.implementations.fun.ScalarSinusd768e66907b9a889
neureka.backend.main.implementations.fun.ScalarSinus.15384adbdc875043a
neureka.backend.main.implementations.fun.ScalarSinus.2f41a154debe1341e
neureka.backend.main.implementations.fun.ScalarSoftplusa3b583fcf33bc650
neureka.backend.main.implementations.fun.ScalarSoftplus.17c872ed08093ba14
neureka.backend.main.implementations.fun.ScalarSoftplus.259236ca52c8ec76a
neureka.backend.main.implementations.fun.ScalarSoftsignfa5153ecd634573b
neureka.backend.main.implementations.fun.ScalarSoftsign.1be80c276e7479a0c
neureka.backend.main.implementations.fun.ScalarSoftsign.243b59ed6a189aa86
neureka.backend.main.implementations.fun.ScalarSqrtadff50eea41c020b
neureka.backend.main.implementations.fun.ScalarSqrt.1fed4701eff2912d7
neureka.backend.main.implementations.fun.ScalarTanh93ce3e86f1d4b3d7
neureka.backend.main.implementations.fun.ScalarTanh.1f2657097003f6cb0
neureka.backend.main.implementations.fun.ScalarTanh.25f0691e39fba236e
neureka.backend.main.implementations.fun.ScalarTanhFast033bf55bfcf07ec9
neureka.backend.main.implementations.fun.ScalarTanhFast.1e4ef42258cc61b5e
neureka.backend.main.implementations.fun.ScalarTanhFast.26e56eff259f9fcea
neureka.backend.main.implementations.fun.api.CPUBiFun77e59f82b2c913a4
neureka.backend.main.implementations.fun.api.CPUFun912dc84331579465
neureka.backend.main.implementations.fun.api.ScalarFune7be52749fd39e26
neureka.backend.main.implementations.linear.CLDotb9357b58d904288b
neureka.backend.main.implementations.linear.CPUDot3d4148a94e836adf
neureka.backend.main.implementations.matmul.CPUMatMula6abbd884bde781a
neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction83fae0f9b390a015
neureka.backend.main.implementations.scalar.CPUScalarFunction7653ae375f88fbec
neureka.backend.main.memory.MemUtil0cff8fdb5286a9d7
neureka.backend.main.memory.MemValidatoradaa897684e0c4e1
neureka.backend.main.operations.ConvUtilc8dcacdc478c2ee3
neureka.backend.main.operations.ElemWiseUtilbf8dabb088c24230
neureka.backend.main.operations.functions.Absolute69a54bbd61c5f6ec
neureka.backend.main.operations.functions.AbstractActivationOperation30fbf19cceec4be8
neureka.backend.main.operations.functions.Cbrtd4e7baf3127104eb
neureka.backend.main.operations.functions.Cosinusa398be65ff0193a0
neureka.backend.main.operations.functions.Expae0b238aa0e1ec02
neureka.backend.main.operations.functions.GaSU895de30c3f11190c
neureka.backend.main.operations.functions.GaTU957fe61cbb3db822
neureka.backend.main.operations.functions.Gaussian96e56587a7a331a0
neureka.backend.main.operations.functions.GaussianFast81d302bb70a4f142
neureka.backend.main.operations.functions.GeLUfa7b7e78fe0e1147
neureka.backend.main.operations.functions.Identity9848cccb0cfff2cb
neureka.backend.main.operations.functions.Log10f0de587dfd1a1352
neureka.backend.main.operations.functions.Logarithmdd8586fdbd2e93a3
neureka.backend.main.operations.functions.Quadratic34a1828847fae8ab
neureka.backend.main.operations.functions.ReLU073baadb71b19412
neureka.backend.main.operations.functions.SeLU24732b8b81870b0d
neureka.backend.main.operations.functions.SiLUe122cb4d276f108e
neureka.backend.main.operations.functions.Sigmoid0fd7e7ea1256a1d8
neureka.backend.main.operations.functions.Sinus27ae18a1168ea90b
neureka.backend.main.operations.functions.Softplus590201e468a93e57
neureka.backend.main.operations.functions.Softsign4599bd49469238cf
neureka.backend.main.operations.functions.Sqrtac9d3b57dc869655
neureka.backend.main.operations.functions.Tanhad6586f1ce1c6c38
neureka.backend.main.operations.functions.TanhFast0f11a4510719ab49
neureka.backend.main.operations.indexer.Product18301f0f68fc6122
neureka.backend.main.operations.indexer.Summation2e13dfe164250598
neureka.backend.main.operations.linear.Convolutionbfe3f289e3580858
neureka.backend.main.operations.linear.DotProduct99567b1580838f63
neureka.backend.main.operations.linear.MatMul23b0e1f547e8fd26
neureka.backend.main.operations.linear.XConvLeft5b3ae2b85c21c4ff
neureka.backend.main.operations.linear.XConvRightcfc8cdf0bb3ab529
neureka.backend.main.operations.linear.internal.blas.AXPY3db4609143b0d17a
neureka.backend.main.operations.linear.internal.blas.COPY6dd2d8a99b947e5b
neureka.backend.main.operations.linear.internal.blas.DOTed88f721453f4480
neureka.backend.main.operations.linear.internal.blas.GEMM13cf84c50cc1b46a
neureka.backend.main.operations.linear.internal.blas.IAXPYb40411b8085379d8
neureka.backend.main.operations.linear.internal.blas.IDOT0d6ede6b27c84caf
neureka.backend.main.operations.linear.internal.blas.IGEMMc89f6c87e2f3be86
neureka.backend.main.operations.linear.internal.opencl.CLGEMMc3260f5635f89e38
neureka.backend.main.operations.linear.internal.opencl.CLReduce0b529aec4e25cd5c
neureka.backend.main.operations.linear.internal.opencl.CLReduce.10f22eec5c4fe5dd8
neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type68e64588799f02dc
neureka.backend.main.operations.linear.internal.opencl.CLSum8f1d9d9931cd8203
neureka.backend.main.operations.operator.Addition70b78a9ecb3f7495
neureka.backend.main.operations.operator.Division42db6d37e0968e45
neureka.backend.main.operations.operator.Modulo252c88d522fe087b
neureka.backend.main.operations.operator.Multiplication78ca52f05d68f746
neureka.backend.main.operations.operator.Power4488c99acaea6fda
neureka.backend.main.operations.operator.Subtractionf3407f5ae7cf73b2
neureka.backend.main.operations.operator.Utilc296c9f02877387c
neureka.backend.main.operations.other.AssignLeft16ea849df198e647
neureka.backend.main.operations.other.Cat236ddb57afb7c243
neureka.backend.main.operations.other.DimTrim9c6d2f839d0b9cec
neureka.backend.main.operations.other.Maxc85611de145a5cef
neureka.backend.main.operations.other.Minc56904069cc4b2fd
neureka.backend.main.operations.other.Permute2078b52d6ed8278b
neureka.backend.main.operations.other.Randomizationf21b242abe71cd9f
neureka.backend.main.operations.other.ReLayouta94da54e9f497e22
neureka.backend.main.operations.other.Reshapeb8329b389e560dd0
neureka.backend.main.operations.other.Slice91aa4e9e5749e067
neureka.backend.main.operations.other.Sum31019e9d86bdbf3d
neureka.backend.main.operations.other.internal.CPUReduce1955954c3787d9e0
neureka.backend.main.operations.other.internal.CPUReduce.1aafdc0bdee2b150a
neureka.backend.main.operations.other.internal.CPUReduce.Type2903d363c14365c0
neureka.backend.main.operations.other.internal.CPUSumc2acc6d7866b3b6a
neureka.backend.ocl.CLBackend7d5efd8e5b93bee8
neureka.backend.ocl.CLSettingsc8b883b722eff1d1
neureka.common.composition.AbstractComponentOwnerd3408bef63254e4a
neureka.common.composition.AbstractComponentOwner.18a27f18244e29e7a
neureka.common.composition.AbstractComponentOwner.2d51891e9d6cc93d1
neureka.common.composition.Component4a2801d163cb6807
neureka.common.composition.Component.IsBeing28a4dd019f93d906
neureka.common.composition.Component.OwnerChangeRequestfc61caf958ef7adb
neureka.common.utility.Cachea38eaa0561dfe9a4
neureka.common.utility.Cache.LazyEntrya5cedd33e8fa38e5
neureka.common.utility.DataConverterea8c6567a40aa108
neureka.common.utility.DataConverter.ForTensorde66dc506821e3bb
neureka.common.utility.DataConverter.Utilityf35a77d1360eb01a
neureka.common.utility.ListReader01745410813a08ba
neureka.common.utility.ListReader.Resultd8d8a23d37e131d7
neureka.common.utility.LogUtil11743aa5a9e67e2a
neureka.common.utility.SettingsLoaderc0fef78576381483
neureka.common.utility.SettingsLoader.TypeCheckerc3a33af7eeba1834
neureka.devices.AbstractBaseDevicebbfc650b8c2135d8
neureka.devices.AbstractDeviceac00338c9a2cbbab
neureka.devices.AbstractDevice.1bdf3ed347883ba0b
neureka.devices.AbstractDevice.1.14bc38979e3cbc490
neureka.devices.AbstractDevice.1.200ff537fd9d2f146
neureka.devices.AbstractDeviceData3d85a63632af6406
neureka.devices.CustomDeviceCleaner4b8b7745b36b4c3c
neureka.devices.CustomDeviceCleaner.ReferenceWithCleanupddb66cdbcfd8caf2
neureka.devices.Deviced6e5c35f223c6fe7
neureka.devices.Device.16efb09e5a9eff5ee
neureka.devices.Device.Accessd182202f8f5a5d28
neureka.devices.Device.Writer495bed55e5b8f386
neureka.devices.DeviceCleanerb6092b3ccf47345d
neureka.devices.Query091720709186ebbf
neureka.devices.ReferenceCounterb66ac060e23fd762
neureka.devices.ReferenceCounter.ChangeEvent9980de8e205091a6
neureka.devices.ReferenceCounter.ChangeType5dcd2715b4a9ee94
neureka.devices.file.AbstractFileHandlec43e44e5e2b78e9c
neureka.devices.file.AbstractImageFileHandle76090d8ccba82e63
neureka.devices.file.CSVHandle62614e5ed1fa413d
neureka.devices.file.CSVHandle.CSVTypeb5ed10eccebfdd2c
neureka.devices.file.FileDevicefbf3c2ebd8f0710f
neureka.devices.file.FileDevice.1828e4604eb6fda94
neureka.devices.file.FileHandle852de928c98be348
neureka.devices.file.HandleFactorye9d316de5b1eefb7
neureka.devices.file.IDXHandle333007896f0c863a
neureka.devices.file.IDXHandle.IDXType02cca1676b487495
neureka.devices.file.ImageFileType786ac729c479ba45
neureka.devices.file.JPEGHandle0273ba17334ea8c6
neureka.devices.file.JPEGHandle.146dee30cb90c88f8
neureka.devices.file.NumberReaderbd4ccc4b3a283675
neureka.devices.file.PNGHandleea3ed10e39831c4b
neureka.devices.file.PNGHandle.149d57131e0ab550c
neureka.devices.host.CPUc11dc3a44094f5ff
neureka.devices.host.CPU.JVMExecutore343b0cfe9380f43
neureka.devices.host.CPUDatafaff681c65ad1611
neureka.devices.host.concurrent.Parallelism6467ec04da4ecd46
neureka.devices.host.concurrent.WorkScheduler.Divider0bf3d42d0dfc3dda
neureka.devices.host.machine.BasicMachinedebbc3baf390dd95
neureka.devices.host.machine.CommonMachined830eb3e76650dba
neureka.devices.host.machine.ConcreteMachine37082d85c714c509
neureka.devices.host.machine.Hardware5aa7d913352f0da4
neureka.devices.opencl.JVMData94d4e296178ef480
neureka.devices.opencl.KernelCaller6ba538a839c5b0b8
neureka.devices.opencl.OpenCLDevice5af59e6235848a87
neureka.devices.opencl.OpenCLDevice.cl_dtype76b8e268d091fc33
neureka.devices.opencl.utility.CLFunctionCompiler81f0224d37acf8b1
neureka.devices.opencl.utility.Messagesda68f2aca608b3c5
neureka.devices.opencl.utility.Messages.Tips12cdd1f9600f2371
neureka.dtype.DataTypef3f5a3cc3a71bbaa
neureka.dtype.DataType.18af4a11205a79526
neureka.dtype.custom.AbstractNumericTyped9fa99eabec20a1a
neureka.dtype.custom.F32c17c1d2f6e1c3f08
neureka.dtype.custom.F64235bbd830b5844bd
neureka.dtype.custom.I162c9603ce15e7fe37
neureka.dtype.custom.I32db2bd9ff74db4535
neureka.dtype.custom.I64753efdd13e20dec0
neureka.dtype.custom.I89246dffd3a934df0
neureka.dtype.custom.UI16f1338b581aec66c3
neureka.dtype.custom.UI32e324ceaacd9ec972
neureka.dtype.custom.UI64c784bfe3b766d07b
neureka.dtype.custom.UI8c0658e50ac1499ad
neureka.fluent.building.NdaBuildercc4217a6c802c06c
neureka.fluent.building.states.IterByOrIterFromOrAllTensora9745426ca4c5dfa
neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor0e07c4d9fa849a95
neureka.fluent.slicing.AxisSliceBuilderea286d974c77143c
neureka.fluent.slicing.SliceBuilder5966ca567c0e163e
neureka.fluent.slicing.SmartSlicerc859902eb5cbfdf4
neureka.framing.NDFrame88564b864961237b
neureka.framing.Relation0c175fd1e296ee00
neureka.framing.fluent.AxisFrame80448f70b425f7d1
neureka.framing.fluent.AxisFrame.Builderd333444e1043263f
neureka.math.Function810be94f72180ef7
neureka.math.Function.Callableb8cfb4c5ae9d0911
neureka.math.FunctionCache9a051ff50128b39a
neureka.math.FunctionCache.14a183a83e9ea27c9
neureka.math.Functions58a3f7fb5a97e91b
neureka.math.args.Arg9050f31aaf3d647b
neureka.math.args.Arg.Axisf7d719fb76862ef3
neureka.math.args.Arg.DerivIdx5c9decded66436a7
neureka.math.args.Arg.Derivative3671e6d86b910a57
neureka.math.args.Arg.Indiceseaf84b7d4e774b7c
neureka.math.args.Arg.Layout9f3c8243cb595858
neureka.math.args.Arg.Offset2b2677b1179a69f2
neureka.math.args.Arg.Seedea25ac63f1dbf8c1
neureka.math.args.Arg.Shape160c6cd6fa693304
neureka.math.args.Arg.Stride2b6e693ef5be6074
neureka.math.args.Arg.TargetDevice29ea5fcd10df03b8
neureka.math.args.Arg.VarIdx93d0f689c0e4493c
neureka.math.args.Args90118ce4190e89c4
neureka.math.implementations.FunctionConstant51c8beed3daf523b
neureka.math.implementations.FunctionInputff83db6f5ccb2839
neureka.math.implementations.FunctionNodea8af3d955cf9adba
neureka.math.implementations.FunctionVariablea556ec835ff0c211
neureka.math.parsing.FunctionParser531c67c6aa2912c9
neureka.math.parsing.ParseUtil4fd1cefe65d6e086
neureka.ndim.NDConstructor310faa9f0b791d09
neureka.ndim.NDConstructor.1b396a49dd4e7e82a
neureka.ndim.NDConstructor.2de5f9f4c80f5e9f6
neureka.ndim.NDUtild0c89918b998bf1e
neureka.ndim.NDimensional24408c5b93ec36b0
neureka.ndim.config.AbstractNDCdaa941bde474d54e
neureka.ndim.config.NDConfiguration30115f9afe8e251f
neureka.ndim.config.NDConfiguration.Layout41c65d829830bf7e
neureka.ndim.config.NDConfiguration.Utility783c7b68f47d5282
neureka.ndim.config.NDTraita4b7ed603f3068f6
neureka.ndim.config.NoOpNDConfig691d1507d0f1932a
neureka.ndim.config.types.D1C4536ffc1b7bcf28f
neureka.ndim.config.types.D2Cb5937d6a33bb40c1
neureka.ndim.config.types.D3C92305f44a5fb0436
neureka.ndim.config.types.permuted.Permuted1DConfigurationd7bf7fa2d624bf4d
neureka.ndim.config.types.permuted.Permuted2DConfigurationdf158eef07713a52
neureka.ndim.config.types.permuted.Permuted3DConfigurationc704b1fbb740d6ae
neureka.ndim.config.types.permuted.PermutedNDConfiguration551a8751bd58698a
neureka.ndim.config.types.simple.Simple0DConfigurationc892c26a1b1fd950
neureka.ndim.config.types.simple.Simple1DConfigurationd1ac689dfacb03c1
neureka.ndim.config.types.simple.Simple2DConfiguration475afc911ff3eaa5
neureka.ndim.config.types.simple.Simple3DConfigurationc22e6e39dfde0db0
neureka.ndim.config.types.simple.SimpleNDConfiguratione89bd69afc735a1e
neureka.ndim.config.types.sliced.Sliced0DConfigurationc935b51a1380f3e4
neureka.ndim.config.types.sliced.Sliced1DConfigurationdfec3dbac28457b8
neureka.ndim.config.types.sliced.Sliced2DConfigurationcbfa6cf298827936
neureka.ndim.config.types.sliced.Sliced3DConfigurationbfb15b1e018765b6
neureka.ndim.config.types.sliced.SlicedNDConfiguration8948c9e2fc42440c
neureka.ndim.config.types.views.SimpleReshapeView6592a7d1aa66cc83
neureka.ndim.config.types.views.virtual.VirtualNDConfiguratione215b65e596ad604
neureka.ndim.iterator.NDIteratorbf8dc3cec6a1dafb
neureka.ndim.iterator.NDIterator.NonVirtualb4eeca883569336a
neureka.ndim.iterator.types.permuted.Permuted2DCIterator3ba3f758b9d56a4d
neureka.ndim.iterator.types.permuted.Permuted3DCIterator8a50893a325c549f
neureka.ndim.iterator.types.simple.Simple1DCIterator013408bf8b10afcd
neureka.ndim.iterator.types.simple.Simple2DCIterator5fc40bc38fbb9837
neureka.ndim.iterator.types.simple.Simple3DCIterator6225052f111d6014
neureka.ndim.iterator.types.sliced.Sliced1DCIterator51446c41fa9204f0
neureka.ndim.iterator.types.sliced.Sliced2DCIterator49598f4211ffa694
neureka.ndim.iterator.types.sliced.Sliced3DCIteratora1cb870df8993882
neureka.ndim.iterator.types.sliced.SlicedNDIteratorbbed5f7cdf10c6dd
neureka.ndim.iterator.types.virtual.VirtualNDIterator48d53f96dcbd0bbb
neureka.optimization.Optimizer1da682ddc28639ba
neureka.optimization.Optimizer.22e757146a432be8c
neureka.optimization.implementations.ADAMd2d9e517b12d4e57
neureka.optimization.implementations.ADAMFactorya434c27a5d3c3b23
neureka.optimization.implementations.AdaGrad9fd94ed2273be331
neureka.optimization.implementations.AdaGradFactory8e95d9c9429aaa3c
neureka.optimization.implementations.Momentumc1847c1b566988f6
neureka.optimization.implementations.MomentumFactoryad8ab584d997c2d6
neureka.optimization.implementations.RMSProp95f698f543f75931
neureka.optimization.implementations.RMSPropFactory6f62e80918944291
neureka.optimization.implementations.SGD4ddfa33a6472a79d
neureka.optimization.implementations.SGDFactory8d659100117a16a5
neureka.view.NDPrintSettings485f9b45ce69f728
neureka.view.NdaAsStringf31f5e2c751ae64f
neureka.view.NdaAsString.178296d88973ce40a
neureka.view.NdaAsString.Utilc5da729c53a313dc
org.apache.commons.lang.StringUtils9dd94cc65aafa7e1
org.apache.groovy.ast.tools.AnnotatedNodeUtils2c89f7bb9add24d5
org.apache.groovy.ast.tools.ClassNodeUtilsbf168134ede5518f
org.apache.groovy.ast.tools.ConstructorNodeUtils1c291a51f30182ed
org.apache.groovy.ast.tools.ExpressionUtils4a13f5496d06e7c1
org.apache.groovy.ast.tools.ImmutablePropertyUtilse9720a222ec0beac
org.apache.groovy.ast.tools.MethodNodeUtilsaa82b6614ace2af1
org.apache.groovy.io.StringBuilderWritera68174f2306f5a8c
org.apache.groovy.json.DefaultFastStringServicef7560fc98a0aff7c
org.apache.groovy.json.DefaultFastStringServiceFactory6c824e26c96d4d48
org.apache.groovy.json.internal.BaseJsonParser584d5f0baedd400f
org.apache.groovy.json.internal.ByteScanner9f19b62afc96c6ee
org.apache.groovy.json.internal.CacheType0d05b90ef7d91a76
org.apache.groovy.json.internal.CharBuf09faf6ef7a337218
org.apache.groovy.json.internal.Chr57bd2788ec22e6d3
org.apache.groovy.json.internal.FastStringUtils497e9aa587de930f
org.apache.groovy.json.internal.FastStringUtils.ServiceHolderc8ae267a17e78e24
org.apache.groovy.json.internal.JsonParserCharArray26075543d509c523
org.apache.groovy.json.internal.LazyMapf3b9cd56252f0cb7
org.apache.groovy.json.internal.SimpleCache37c9bf5ad5c6ccd0
org.apache.groovy.lang.GroovyObjectHelper89f2299b541db208
org.apache.groovy.lang.GroovyObjectHelper.17b93e90e8aacc8c0
org.apache.groovy.parser.antlr4.AbstractLexerae13a169aea8d237
org.apache.groovy.parser.antlr4.AbstractParser97a0e933c249392d
org.apache.groovy.parser.antlr4.Antlr4ParserPlugin38e6808ae0a2bab7
org.apache.groovy.parser.antlr4.Antlr4PluginFactoryfabc9c16749f4887
org.apache.groovy.parser.antlr4.AstBuilder01b4512b27284c03
org.apache.groovy.parser.antlr4.AstBuilder.34a7f39baefa94c07
org.apache.groovy.parser.antlr4.AstBuilder.DeclarationListStatementb60b69b0ce4f6d7f
org.apache.groovy.parser.antlr4.GroovyLangLexer20e861043914f40e
org.apache.groovy.parser.antlr4.GroovyLangLexer.PositionAdjustingLexerATNSimulator84d506555ce4b8c8
org.apache.groovy.parser.antlr4.GroovyLangParser557c60358e21afce
org.apache.groovy.parser.antlr4.GroovyLexer375b2e75cb4f0f7d
org.apache.groovy.parser.antlr4.GroovyLexer.Parena4b8b9535208dfed
org.apache.groovy.parser.antlr4.GroovyParserc2dd19b633097c3c
org.apache.groovy.parser.antlr4.GroovyParser.AdditiveExprAltContexta5c3c7263906c24f
org.apache.groovy.parser.antlr4.GroovyParser.AnnotationContext0f1bfea1a0660594
org.apache.groovy.parser.antlr4.GroovyParser.AnnotationNameContextd8c045dca2e57db6
org.apache.groovy.parser.antlr4.GroovyParser.AnnotationsOptContext926034552a3b0721
org.apache.groovy.parser.antlr4.GroovyParser.AnonymousInnerClassDeclarationContextcf82f8b9f5a12272
org.apache.groovy.parser.antlr4.GroovyParser.ArgumentsContextfc335d3aa250d096
org.apache.groovy.parser.antlr4.GroovyParser.ArrayInitializerContextfe3db0929cbc7590
org.apache.groovy.parser.antlr4.GroovyParser.AssertStatementContextb2104d5bd68defd2
org.apache.groovy.parser.antlr4.GroovyParser.AssertStmtAltContext223ba4a140d01b35
org.apache.groovy.parser.antlr4.GroovyParser.AssignmentExprAltContext5a70164d8b9d3db3
org.apache.groovy.parser.antlr4.GroovyParser.BlockContextb0e815f4ce1512be
org.apache.groovy.parser.antlr4.GroovyParser.BlockStatementContext0130344ba8180a6b
org.apache.groovy.parser.antlr4.GroovyParser.BlockStatementsContextfc44efadca7e37a6
org.apache.groovy.parser.antlr4.GroovyParser.BlockStatementsOptContext7d41d7abefa5db9d
org.apache.groovy.parser.antlr4.GroovyParser.BlockStmtAltContextf87aa21f23026c05
org.apache.groovy.parser.antlr4.GroovyParser.BooleanLiteralAltContext6b2b4c007db25b7c
org.apache.groovy.parser.antlr4.GroovyParser.BreakStatementContext8f40495c7e7fa1f0
org.apache.groovy.parser.antlr4.GroovyParser.BreakStmtAltContext66b91d7910ddd783
org.apache.groovy.parser.antlr4.GroovyParser.BuiltInTypeContext8e27c860f2afc863
org.apache.groovy.parser.antlr4.GroovyParser.BuiltInTypePrmrAltContext34f95687e604b4d5
org.apache.groovy.parser.antlr4.GroovyParser.CastExprAltContext52f136113198ccbb
org.apache.groovy.parser.antlr4.GroovyParser.CastParExpressionContextd2311721afc78f03
org.apache.groovy.parser.antlr4.GroovyParser.CatchClauseContext39fa2cdba4f78203
org.apache.groovy.parser.antlr4.GroovyParser.CatchTypeContext1fc3fc4dde8f237d
org.apache.groovy.parser.antlr4.GroovyParser.ClassBodyContexta3998961563db011
org.apache.groovy.parser.antlr4.GroovyParser.ClassBodyDeclarationContexta245a174042b93de
org.apache.groovy.parser.antlr4.GroovyParser.ClassDeclarationContext0e59e40835c6f4bf
org.apache.groovy.parser.antlr4.GroovyParser.ClassNameContextba246973741366c6
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceModifierContextc664ad3f67347360
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceModifiersContexta421f25b458166e2
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceModifiersOptContext6e72ecb5bb7fa9fd
org.apache.groovy.parser.antlr4.GroovyParser.ClassOrInterfaceTypeContext0995a91047bd0690
org.apache.groovy.parser.antlr4.GroovyParser.ClassicalForControlContextbdce8afb8b4eed80
org.apache.groovy.parser.antlr4.GroovyParser.ClosureContext51b75632566b842c
org.apache.groovy.parser.antlr4.GroovyParser.ClosureOrLambdaExpressionContextc456127dfc085295
org.apache.groovy.parser.antlr4.GroovyParser.ClosureOrLambdaExpressionPrmrAltContext78c7ae68774f4454
org.apache.groovy.parser.antlr4.GroovyParser.CommandArgumentContext38e5287331d1ad07
org.apache.groovy.parser.antlr4.GroovyParser.CommandExprAltContext04dff762ad292028
org.apache.groovy.parser.antlr4.GroovyParser.CommandExpressionContext08b1a22374058ba6
org.apache.groovy.parser.antlr4.GroovyParser.CompilationUnitContext8b9d4c7b2e30d482
org.apache.groovy.parser.antlr4.GroovyParser.ConditionalExprAltContextda71a251302c8cf7
org.apache.groovy.parser.antlr4.GroovyParser.ConditionalStatementContext7dbaa0b822a40fbe
org.apache.groovy.parser.antlr4.GroovyParser.ConditionalStmtAltContexte76d504373d747b8
org.apache.groovy.parser.antlr4.GroovyParser.CreatedNameContext9b9ebae430033e08
org.apache.groovy.parser.antlr4.GroovyParser.CreatorContext3498bbc3b077a9f2
org.apache.groovy.parser.antlr4.GroovyParser.DimContextcf82a109a7cc9dfd
org.apache.groovy.parser.antlr4.GroovyParser.ElementValueArrayInitializerContexte99d9ac28e5828cf
org.apache.groovy.parser.antlr4.GroovyParser.ElementValueContexta9f625be57367f30
org.apache.groovy.parser.antlr4.GroovyParser.ElementValuesContext0cc1f7c87b787322
org.apache.groovy.parser.antlr4.GroovyParser.EmptyDimsContextbc0b45e13fd0832d
org.apache.groovy.parser.antlr4.GroovyParser.EmptyDimsOptContext06c05e3942194797
org.apache.groovy.parser.antlr4.GroovyParser.EmptyStmtAltContext0555b7fa19d9d2c1
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedArgumentListElementContextd43883e440cc8ca3
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedArgumentListInParContext6e80cc5a96ec1687
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedForControlContext831fd48c418557cf
org.apache.groovy.parser.antlr4.GroovyParser.EnhancedStatementExpressionContext1e2aa70c271e8c92
org.apache.groovy.parser.antlr4.GroovyParser.EqualityExprAltContextfadd8eb8f4827682
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionContext94d563a582c0d141
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionInParContext0464dc451ac8c0f1
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionListContext62290d1b6124f4a7
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionListElementContextb71c0b7cbd4a1a16
org.apache.groovy.parser.antlr4.GroovyParser.ExpressionStmtAltContext2be1639f15d36cc2
org.apache.groovy.parser.antlr4.GroovyParser.FieldDeclarationContext599c422dc1d7676d
org.apache.groovy.parser.antlr4.GroovyParser.FloatingPointLiteralAltContextf8f3a01658ea91b7
org.apache.groovy.parser.antlr4.GroovyParser.ForControlContextf2a3c1788e3ea024
org.apache.groovy.parser.antlr4.GroovyParser.ForInitContext8d60f7273b467b76
org.apache.groovy.parser.antlr4.GroovyParser.ForStmtAltContextbd6e29318cb0c195
org.apache.groovy.parser.antlr4.GroovyParser.ForUpdateContext38b7aac7335e589a
org.apache.groovy.parser.antlr4.GroovyParser.FormalParameterContexta3f89d8f3d2da293
org.apache.groovy.parser.antlr4.GroovyParser.FormalParameterListContext298b6f6a97155dfd
org.apache.groovy.parser.antlr4.GroovyParser.FormalParametersContextd2b96cc2896a9b7e
org.apache.groovy.parser.antlr4.GroovyParser.GroovyParserRuleContext85d161bd61274879
org.apache.groovy.parser.antlr4.GroovyParser.GstringContext8775aed1404f58f2
org.apache.groovy.parser.antlr4.GroovyParser.GstringPathContextfaf201edb68ea713
org.apache.groovy.parser.antlr4.GroovyParser.GstringPrmrAltContext019eb16addfa4f7f
org.apache.groovy.parser.antlr4.GroovyParser.GstringValueContext003d80b9e5447f12
org.apache.groovy.parser.antlr4.GroovyParser.IdentifierContext77b7581a2ecc3e0f
org.apache.groovy.parser.antlr4.GroovyParser.IdentifierPrmrAltContextda68055402d6b479
org.apache.groovy.parser.antlr4.GroovyParser.IfElseStatementContextdd25bc18c2a4a68c
org.apache.groovy.parser.antlr4.GroovyParser.ImportDeclarationContext33560bf3f416da68
org.apache.groovy.parser.antlr4.GroovyParser.InclusiveOrExprAltContext45ce651c1daefa9f
org.apache.groovy.parser.antlr4.GroovyParser.IndexPropertyArgsContexta1e79a826158bee3
org.apache.groovy.parser.antlr4.GroovyParser.IntegerLiteralAltContext3bc790a6c6037c0e
org.apache.groovy.parser.antlr4.GroovyParser.KeywordsContextac81bf6f07a9153d
org.apache.groovy.parser.antlr4.GroovyParser.LabeledStmtAltContextcedc2d0ee495b66a
org.apache.groovy.parser.antlr4.GroovyParser.LambdaBodyContext511359f288eb7c21
org.apache.groovy.parser.antlr4.GroovyParser.ListContext185c99255969ebec
org.apache.groovy.parser.antlr4.GroovyParser.ListPrmrAltContext0d31711fd3ddf997
org.apache.groovy.parser.antlr4.GroovyParser.LiteralContext4bc9b36ff5315397
org.apache.groovy.parser.antlr4.GroovyParser.LiteralPrmrAltContext3df377654f55e6d1
org.apache.groovy.parser.antlr4.GroovyParser.LocalVariableDeclarationContextfb1444f441b3bc9e
org.apache.groovy.parser.antlr4.GroovyParser.LocalVariableDeclarationStmtAltContexta00bae16151c56fc
org.apache.groovy.parser.antlr4.GroovyParser.LogicalAndExprAltContext7a50b381b1fa9068
org.apache.groovy.parser.antlr4.GroovyParser.LogicalOrExprAltContext8ad028398ced9d6b
org.apache.groovy.parser.antlr4.GroovyParser.LoopStatementContext1a5e91f635431202
org.apache.groovy.parser.antlr4.GroovyParser.LoopStmtAltContext7ee61ad42e1ead91
org.apache.groovy.parser.antlr4.GroovyParser.MapContext665c4f96064b06ea
org.apache.groovy.parser.antlr4.GroovyParser.MapEntryContext2e4fc9e01e276f43
org.apache.groovy.parser.antlr4.GroovyParser.MapEntryLabelContext97f041541fc6cb66
org.apache.groovy.parser.antlr4.GroovyParser.MapEntryListContext4a8e7562832c757e
org.apache.groovy.parser.antlr4.GroovyParser.MapPrmrAltContextfa02542f1f7b1a18
org.apache.groovy.parser.antlr4.GroovyParser.MemberDeclarationContexte703e3b71c53d606
org.apache.groovy.parser.antlr4.GroovyParser.MethodBodyContext68bf118f6d0170b8
org.apache.groovy.parser.antlr4.GroovyParser.MethodDeclarationContextbe581356d242f527
org.apache.groovy.parser.antlr4.GroovyParser.MethodNameContextb70444746f10bf10
org.apache.groovy.parser.antlr4.GroovyParser.ModifierContextfd804d53e7a1424e
org.apache.groovy.parser.antlr4.GroovyParser.ModifiersContext9e6116ff56433408
org.apache.groovy.parser.antlr4.GroovyParser.ModifiersOptContextc6b93880044bbb5d
org.apache.groovy.parser.antlr4.GroovyParser.MultiplicativeExprAltContextf41611eecdba6d04
org.apache.groovy.parser.antlr4.GroovyParser.NamePartContexteb0202833ac814d5
org.apache.groovy.parser.antlr4.GroovyParser.NewPrmrAltContextda6bfcafb70634c6
org.apache.groovy.parser.antlr4.GroovyParser.NlsContext0ffcd1c91f710f5b
org.apache.groovy.parser.antlr4.GroovyParser.NullLiteralAltContextfa90599bfbc01a33
org.apache.groovy.parser.antlr4.GroovyParser.PackageDeclarationContext60082c0fffcc1e33
org.apache.groovy.parser.antlr4.GroovyParser.ParExpressionContext8e4dbd66fbf14f5c
org.apache.groovy.parser.antlr4.GroovyParser.ParenPrmrAltContextc649b5d46b7e072b
org.apache.groovy.parser.antlr4.GroovyParser.PathElementContext6f82bb6de5336fd6
org.apache.groovy.parser.antlr4.GroovyParser.PathExpressionContext1bc50d1ceaba9efd
org.apache.groovy.parser.antlr4.GroovyParser.PostfixExprAltContext7136410a0bc59aec
org.apache.groovy.parser.antlr4.GroovyParser.PostfixExpressionContextd265add3b173d8bb
org.apache.groovy.parser.antlr4.GroovyParser.PowerExprAltContext9f737baba20e4fad
org.apache.groovy.parser.antlr4.GroovyParser.PrimaryContext2f76fb27978e5120
org.apache.groovy.parser.antlr4.GroovyParser.PrimitiveTypeContext4b5507791f602f69
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedClassNameContext021eb082cdcb55aa
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedNameContext2c1e3b4a1fd01a81
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedNameElementContext9315040c8e0196a3
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedNameElementsContextd993889b1dd1c57d
org.apache.groovy.parser.antlr4.GroovyParser.QualifiedStandardClassNameContext5ccfecd157e6692a
org.apache.groovy.parser.antlr4.GroovyParser.RelationalExprAltContextca89a37dd36cf787
org.apache.groovy.parser.antlr4.GroovyParser.ReturnStmtAltContext50da922ac5e14a0e
org.apache.groovy.parser.antlr4.GroovyParser.ReturnTypeContext987c189c7327881a
org.apache.groovy.parser.antlr4.GroovyParser.RparenContext39a3201fa35a2107
org.apache.groovy.parser.antlr4.GroovyParser.ScriptStatementContext15e2a9bc355ed941
org.apache.groovy.parser.antlr4.GroovyParser.ScriptStatementsContextb71e5b08ec1fd2f9
org.apache.groovy.parser.antlr4.GroovyParser.SepContext56ec5b426ccbed4c
org.apache.groovy.parser.antlr4.GroovyParser.ShiftExprAltContext3d936566667922fc
org.apache.groovy.parser.antlr4.GroovyParser.StandardLambdaExpressionContexta6710796a4ef8a00
org.apache.groovy.parser.antlr4.GroovyParser.StandardLambdaParametersContext8ed2e056b8ddc2e9
org.apache.groovy.parser.antlr4.GroovyParser.StatementContextaa4a51efe9646ce7
org.apache.groovy.parser.antlr4.GroovyParser.StatementExpressionContext88d725acd51481ff
org.apache.groovy.parser.antlr4.GroovyParser.StringLiteralAltContext3f8e14c0c3285bb4
org.apache.groovy.parser.antlr4.GroovyParser.StringLiteralContext124cee4ffc978eb2
org.apache.groovy.parser.antlr4.GroovyParser.SuperPrmrAltContexta7e453d749fb1c31
org.apache.groovy.parser.antlr4.GroovyParser.SwitchBlockStatementGroupContext005af2496c3a72ad
org.apache.groovy.parser.antlr4.GroovyParser.SwitchLabelContextc7baec5ea248f08e
org.apache.groovy.parser.antlr4.GroovyParser.SwitchStatementContext090e3d8b46f6886e
org.apache.groovy.parser.antlr4.GroovyParser.ThisPrmrAltContextefd0a6b515cf6f9b
org.apache.groovy.parser.antlr4.GroovyParser.ThrowStmtAltContextbf9ed1c69139c1bf
org.apache.groovy.parser.antlr4.GroovyParser.TryCatchStatementContext3aae91563e3d9226
org.apache.groovy.parser.antlr4.GroovyParser.TryCatchStmtAltContext272693cc1e566812
org.apache.groovy.parser.antlr4.GroovyParser.TypeArgumentContexta657fb043e804929
org.apache.groovy.parser.antlr4.GroovyParser.TypeArgumentsContextb7759a5f6ecbd4f5
org.apache.groovy.parser.antlr4.GroovyParser.TypeArgumentsOrDiamondContext0e9b6a014c95d0d0
org.apache.groovy.parser.antlr4.GroovyParser.TypeContext64c85e2119819997
org.apache.groovy.parser.antlr4.GroovyParser.TypeDeclarationContextbdfd7aacd3552304
org.apache.groovy.parser.antlr4.GroovyParser.TypeListContext4bdcd2fe3863241e
org.apache.groovy.parser.antlr4.GroovyParser.UnaryAddExprAltContext4b700580a0c22104
org.apache.groovy.parser.antlr4.GroovyParser.UnaryNotExprAltContextc581d155b19e2c63
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclarationContextf45ceddb2f0ecaf9
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclaratorContextbc3327f1d92b69c9
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclaratorIdContextfe63f9e779d6aca4
org.apache.groovy.parser.antlr4.GroovyParser.VariableDeclaratorsContextf1f7ab92eb043fbc
org.apache.groovy.parser.antlr4.GroovyParser.VariableInitializerContext6c5885b654187650
org.apache.groovy.parser.antlr4.GroovyParser.VariableInitializersContexte59c3fbbac70fa5d
org.apache.groovy.parser.antlr4.GroovyParser.VariableModifierContext2d9f21dd564bd064
org.apache.groovy.parser.antlr4.GroovyParser.VariableModifiersContext212d95d8d045b288
org.apache.groovy.parser.antlr4.GroovyParser.VariableModifiersOptContextdb62723332fa25b8
org.apache.groovy.parser.antlr4.GroovyParser.WhileStmtAltContextfd27ac67165c13ac
org.apache.groovy.parser.antlr4.GroovyParserBaseVisitor7c10b7fdc5cc4d7b
org.apache.groovy.parser.antlr4.GroovydocManager5c79668f1e038375
org.apache.groovy.parser.antlr4.ModifierManager120b0d84a86073a6
org.apache.groovy.parser.antlr4.SemanticPredicates1bdb47f94d7e110b
org.apache.groovy.parser.antlr4.SyntaxErrorReportableab8e9dbcabb575de
org.apache.groovy.parser.antlr4.TryWithResourcesASTTransformation5416ed2fd55b8985
org.apache.groovy.parser.antlr4.internal.DescriptiveErrorStrategya6e0e2b6ad3c7579
org.apache.groovy.parser.antlr4.internal.atnmanager.AtnManager4ac1975cb383c354
org.apache.groovy.parser.antlr4.internal.atnmanager.AtnManager.AtnWrapperd328b0f352090051
org.apache.groovy.parser.antlr4.internal.atnmanager.LexerAtnManagerd194f146035c9f3c
org.apache.groovy.parser.antlr4.internal.atnmanager.ParserAtnManagerd9a08733f4727cd3
org.apache.groovy.parser.antlr4.util.PositionConfigureUtilsc479a0ab12a37008
org.apache.groovy.parser.antlr4.util.StringUtils61af71578248b8fd
org.apache.groovy.parser.antlr4.util.StringUtils.1a48a2cd05db61523
org.apache.groovy.parser.antlr4.util.StringUtils.23be5fedabcf240e0
org.apache.groovy.parser.antlr4.util.StringUtils.3726ba31c43296187
org.apache.groovy.parser.antlr4.util.StringUtils.4ef59ce86e1873bec
org.apache.groovy.plugin.GroovyRunnerRegistry3ee0fd6ba999de37
org.apache.groovy.runtime.ObjectUtile4ee7aa145841204
org.apache.groovy.util.BeanUtils0417dea30775a32c
org.apache.groovy.util.Maps576f8db5d5e792ce
org.apache.groovy.util.SystemUtilc4542e5ae25149fa
org.apache.groovy.util.concurrent.ConcurrentReferenceHashMap09caebba226e93a2
org.apache.groovy.util.concurrent.ConcurrentReferenceHashMap.HashEntry6f958eb752f07d8f
org.apache.groovy.util.concurrent.ConcurrentReferenceHashMap.Optioncfee9af457ad79fa
org.apache.groovy.util.concurrent.ConcurrentReferenceHashMap.ReferenceType5d57cf6cde86046a
org.apache.groovy.util.concurrent.ConcurrentReferenceHashMap.Segment2d4d3c477e6872bd
org.apache.groovy.util.concurrent.ConcurrentReferenceHashMap.WeakKeyReferenceb1286e499c6f671f
org.apache.groovy.util.concurrent.ManagedIdentityConcurrentMapa67a864617003567
org.codehaus.groovy.ast.ASTNodeec856de8ee6cc92d
org.codehaus.groovy.ast.AnnotatedNodecaadb5724730eaf0
org.codehaus.groovy.ast.AnnotationNodee3224493a3e005dc
org.codehaus.groovy.ast.AstToTextHelperb576f60569059c7d
org.codehaus.groovy.ast.ClassCodeExpressionTransformer62e72be2fd389034
org.codehaus.groovy.ast.ClassCodeVisitorSupport6f51d1bfed5bb5f2
org.codehaus.groovy.ast.ClassHelper04fc4f90d1f8a420
org.codehaus.groovy.ast.ClassHelper.ClassHelperCache0478929322e9a378
org.codehaus.groovy.ast.ClassNode261ffe6c4adc7406
org.codehaus.groovy.ast.ClassNode.MapOfListsa31c5ba318a55a9d
org.codehaus.groovy.ast.CodeVisitorSupportf562d0f23a0beb54
org.codehaus.groovy.ast.CompileUnit11eace07af3db729
org.codehaus.groovy.ast.ConstructorNoded62ced7529f24e9b
org.codehaus.groovy.ast.DynamicVariable361644ef5abae529
org.codehaus.groovy.ast.FieldNode764a219d1e28e631
org.codehaus.groovy.ast.GenericsType8525b4e1ce7702ed
org.codehaus.groovy.ast.GenericsType.GenericsTypeName74387ba001042328
org.codehaus.groovy.ast.GroovyCodeVisitor95a3a27d5e967657
org.codehaus.groovy.ast.ImportNodefda51804059e61c9
org.codehaus.groovy.ast.InnerClassNode627ea76d63715b81
org.codehaus.groovy.ast.MethodNode25e409b25968c97a
org.codehaus.groovy.ast.MixinNode96e8e19d33b34b42
org.codehaus.groovy.ast.ModifierNode3dcc1ccdc59257e6
org.codehaus.groovy.ast.ModuleNode4e679b38904ec081
org.codehaus.groovy.ast.NodeMetaDataHandlerf37500102f069967
org.codehaus.groovy.ast.PackageNode83348211fdcbe5c6
org.codehaus.groovy.ast.Parameterf880de24d4e121ab
org.codehaus.groovy.ast.PropertyNodeb73ec318f989d667
org.codehaus.groovy.ast.VariableScopeb7f8f462469b81b4
org.codehaus.groovy.ast.decompiled.AnnotationStub4eb08e4d5aeb99ee
org.codehaus.groovy.ast.decompiled.Annotations919631ecebe40e83
org.codehaus.groovy.ast.decompiled.AsmDecompilera9089c51d411fdd5
org.codehaus.groovy.ast.decompiled.AsmDecompiler.1a16a96e81a5c0a73
org.codehaus.groovy.ast.decompiled.AsmDecompiler.AnnotationReader3f1fe0420f6b9669
org.codehaus.groovy.ast.decompiled.AsmDecompiler.AnnotationReader.1296aac0e938876e0
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor806f81af1e5bf0ee
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor.1bb16ddef39447ac5
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor.1.1c4b7626dfe44ce92
org.codehaus.groovy.ast.decompiled.AsmDecompiler.DecompilingVisitor.381772659a87cf285
org.codehaus.groovy.ast.decompiled.AsmReferenceResolver493143d61302d155
org.codehaus.groovy.ast.decompiled.ClassSignatureParser1ee7e4364cb32e91
org.codehaus.groovy.ast.decompiled.ClassSignatureParser.214bbb4e7fa650d37
org.codehaus.groovy.ast.decompiled.ClassSignatureParser.2.1cbb1767148543a53
org.codehaus.groovy.ast.decompiled.ClassSignatureParser.2.2c910eac6533d2579
org.codehaus.groovy.ast.decompiled.ClassStub6c81cb3e5e6cc0b8
org.codehaus.groovy.ast.decompiled.DecompiledClassNode674f2225c36a8a6e
org.codehaus.groovy.ast.decompiled.EnumConstantWrapperead5f3c3cfbd4502
org.codehaus.groovy.ast.decompiled.FieldStubd821e841183186c8
org.codehaus.groovy.ast.decompiled.FormalParameterParser368f12a4e544b631
org.codehaus.groovy.ast.decompiled.FormalParameterParser.18139f045feedd6f2
org.codehaus.groovy.ast.decompiled.LazyFieldNodead1b0c58a5821f4e
org.codehaus.groovy.ast.decompiled.MemberSignatureParser0e4e1228ca2cb417
org.codehaus.groovy.ast.decompiled.MemberStub0c18eb15439675cf
org.codehaus.groovy.ast.decompiled.MethodStub7b69f134e28cc054
org.codehaus.groovy.ast.decompiled.TypeSignatureParserb34a3aa6da2e36c7
org.codehaus.groovy.ast.decompiled.TypeSignatureParser.18d2a0aa4b0669ede
org.codehaus.groovy.ast.decompiled.TypeSignatureParser.273b35b5a88a70376
org.codehaus.groovy.ast.decompiled.TypeWrappercd51a630ebe9d737
org.codehaus.groovy.ast.expr.ArgumentListExpression1be070f311f2fa99
org.codehaus.groovy.ast.expr.ArrayExpression6b5fb7327bf1b0c9
org.codehaus.groovy.ast.expr.BinaryExpressiondce7ab787384e6b3
org.codehaus.groovy.ast.expr.BitwiseNegationExpression0daf6656c57b4342
org.codehaus.groovy.ast.expr.BooleanExpression1f8db1269310081a
org.codehaus.groovy.ast.expr.CastExpression0d538f3969c9853c
org.codehaus.groovy.ast.expr.ClassExpression817a69e7b49ea681
org.codehaus.groovy.ast.expr.ClosureExpression612e7624af30073a
org.codehaus.groovy.ast.expr.ClosureListExpressiondd8a86cf59551a2d
org.codehaus.groovy.ast.expr.ConstantExpression04bb44bc6008e04f
org.codehaus.groovy.ast.expr.ConstructorCallExpressione2478365d2357681
org.codehaus.groovy.ast.expr.DeclarationExpression55c5949f6d91ef49
org.codehaus.groovy.ast.expr.ElvisOperatorExpression6a999787f6fb0b87
org.codehaus.groovy.ast.expr.EmptyExpression1d11452f5922029f
org.codehaus.groovy.ast.expr.EmptyExpression.1b9e1531a39fea5f1
org.codehaus.groovy.ast.expr.Expression816a20cb61dd6b2b
org.codehaus.groovy.ast.expr.FieldExpression093222cedab83970
org.codehaus.groovy.ast.expr.GStringExpression96c2b449e8dec0e5
org.codehaus.groovy.ast.expr.LambdaExpressionceae7d32d1b0193e
org.codehaus.groovy.ast.expr.ListExpressiona77608d5a1e42038
org.codehaus.groovy.ast.expr.MapEntryExpression2228e4e655549e18
org.codehaus.groovy.ast.expr.MapExpression41cb12bb666d9b18
org.codehaus.groovy.ast.expr.MethodCallExpressionb377cf2baad69b7a
org.codehaus.groovy.ast.expr.MethodCallExpression.1fdb3cd64606a87eb
org.codehaus.groovy.ast.expr.MethodPointerExpression34c8bb3123382320
org.codehaus.groovy.ast.expr.MethodReferenceExpressionb9fe359596872382
org.codehaus.groovy.ast.expr.NamedArgumentListExpression3b2dbbfa838bf2db
org.codehaus.groovy.ast.expr.NotExpressiond6e0791f05a477c4
org.codehaus.groovy.ast.expr.PostfixExpression4c995ab4b8033560
org.codehaus.groovy.ast.expr.PropertyExpressione268dc87eb28b623
org.codehaus.groovy.ast.expr.RangeExpression42ae3691f72cb45f
org.codehaus.groovy.ast.expr.StaticMethodCallExpressiondc97c0bd58ba2990
org.codehaus.groovy.ast.expr.TernaryExpression38e15a6d60cd9451
org.codehaus.groovy.ast.expr.TupleExpressioncf3a4135a294d29b
org.codehaus.groovy.ast.expr.UnaryMinusExpressionbe1b6e7a06c95649
org.codehaus.groovy.ast.expr.VariableExpression9b4e63c8a06e0888
org.codehaus.groovy.ast.stmt.AssertStatement085c0e446b041943
org.codehaus.groovy.ast.stmt.BlockStatementc7f448fd76945963
org.codehaus.groovy.ast.stmt.BreakStatement6039306fed9c2a6c
org.codehaus.groovy.ast.stmt.CaseStatement006270308a3e6cca
org.codehaus.groovy.ast.stmt.CatchStatementc52a7de2568d9da7
org.codehaus.groovy.ast.stmt.EmptyStatement62c778d68fae0b06
org.codehaus.groovy.ast.stmt.EmptyStatement.14450ab0173a753b7
org.codehaus.groovy.ast.stmt.ExpressionStatementb49e775b886cbea9
org.codehaus.groovy.ast.stmt.ForStatement0a595aa3c2220d25
org.codehaus.groovy.ast.stmt.IfStatementc1b9c4f3d26f2082
org.codehaus.groovy.ast.stmt.ReturnStatementf4379361135efd0c
org.codehaus.groovy.ast.stmt.Statementf9fa1507607f86b4
org.codehaus.groovy.ast.stmt.SwitchStatement43b48fc5d6d69bc8
org.codehaus.groovy.ast.stmt.ThrowStatement1434cb630b368713
org.codehaus.groovy.ast.stmt.TryCatchStatement77b3d0975dd1575c
org.codehaus.groovy.ast.stmt.WhileStatementd9ff8c9a32f20ba9
org.codehaus.groovy.ast.tools.ClosureUtils1cb5fae2e2e628ae
org.codehaus.groovy.ast.tools.GeneralUtilsaef5a6494ec7b453
org.codehaus.groovy.ast.tools.GenericsUtilsbb6dd7211e1d635a
org.codehaus.groovy.ast.tools.ParameterUtils4ac83ab813c4ff59
org.codehaus.groovy.ast.tools.WideningCategoriesab48c00a717f93e1
org.codehaus.groovy.ast.tools.WideningCategories.LowestUpperBoundClassNodec65afd204a7588f4
org.codehaus.groovy.classgen.AnnotationVisitor8a4331f07ff79c3f
org.codehaus.groovy.classgen.AsmClassGeneratore397ecefd72df1de
org.codehaus.groovy.classgen.BytecodeExpressionde288ac82cb54af4
org.codehaus.groovy.classgen.BytecodeExpression.1e094c64c8960e81a
org.codehaus.groovy.classgen.BytecodeInstruction42479d559ae0946f
org.codehaus.groovy.classgen.BytecodeSequenceffd5cdbd0bf2f5d8
org.codehaus.groovy.classgen.ClassCompletionVerifiere162cac5df775d60
org.codehaus.groovy.classgen.ClassGenerator8aa6a9a98f130e80
org.codehaus.groovy.classgen.EnumCompletionVisitora8de6ba10c2036a2
org.codehaus.groovy.classgen.EnumVisitorb94ec2b84c933340
org.codehaus.groovy.classgen.ExtendedVerifierfde85e6aca6c0460
org.codehaus.groovy.classgen.FinalVariableAnalyzer66d7fa5039227240
org.codehaus.groovy.classgen.FinalVariableAnalyzer.1316606381ec5cfc3
org.codehaus.groovy.classgen.FinalVariableAnalyzer.StateMapb50c24dff426b2b2
org.codehaus.groovy.classgen.FinalVariableAnalyzer.VariableState4e34c073edcc818e
org.codehaus.groovy.classgen.GeneratorContext8db22a0abe3c885e
org.codehaus.groovy.classgen.InnerClassCompletionVisitor0e0af0a4a20dd084
org.codehaus.groovy.classgen.InnerClassVisitor188cf29a2fde1f56
org.codehaus.groovy.classgen.InnerClassVisitorHelperc5d4750c3ff3ebe3
org.codehaus.groovy.classgen.ReturnAdder48cdfe7e8be2c38b
org.codehaus.groovy.classgen.VariableScopeVisitor0805241d57d187be
org.codehaus.groovy.classgen.VariableScopeVisitor.StateStackElementcbf5b05681520497
org.codehaus.groovy.classgen.Verifier77772fc5131d17ee
org.codehaus.groovy.classgen.Verifier.18e9c27fd574fcbb0
org.codehaus.groovy.classgen.Verifier.123bb7040225f8b327
org.codehaus.groovy.classgen.Verifier.252316b00e5b33fd3
org.codehaus.groovy.classgen.Verifier.31fec6ded7b251f48
org.codehaus.groovy.classgen.Verifier.613f6b8fcacbae02e
org.codehaus.groovy.classgen.Verifier.71feafc3feb09f032
org.codehaus.groovy.classgen.Verifier.87b535344efb0e144
org.codehaus.groovy.classgen.Verifier.98fe926c93c6ad052
org.codehaus.groovy.classgen.Verifier.SwapInitStatement8d4d399599bdf2ba
org.codehaus.groovy.classgen.Verifier.SwapInitStatement.SwapInitInstruction466e890c80daabea
org.codehaus.groovy.classgen.VerifierCodeVisitore972a3b040e115c2
org.codehaus.groovy.classgen.asm.AssertionWriter6dcf3e9734c7befe
org.codehaus.groovy.classgen.asm.BinaryBooleanExpressionHelper6104f0110878e7ed
org.codehaus.groovy.classgen.asm.BinaryDoubleExpressionHelper96dbda72f794d1a5
org.codehaus.groovy.classgen.asm.BinaryExpressionHelpera32144ba5721fb0c
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatchera82c5bb5386fb2de
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatcher.BinaryByteExpressionHelper71a13f668d616e28
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatcher.BinaryCharExpressionHelperc058fc9112a4e925
org.codehaus.groovy.classgen.asm.BinaryExpressionMultiTypeDispatcher.BinaryShortExpressionHelper7982aa460eac34f8
org.codehaus.groovy.classgen.asm.BinaryExpressionWriter23c425b6d93f454b
org.codehaus.groovy.classgen.asm.BinaryFloatExpressionHelper651bebb1c9be5cb8
org.codehaus.groovy.classgen.asm.BinaryIntExpressionHelper1d9c724ff450686d
org.codehaus.groovy.classgen.asm.BinaryLongExpressionHelperfa2e93085030ba30
org.codehaus.groovy.classgen.asm.BinaryObjectExpressionHelpercd0bc24d39fc3799
org.codehaus.groovy.classgen.asm.BytecodeHelperc7f4089c4aadb53d
org.codehaus.groovy.classgen.asm.BytecodeHelper.LoadVarHandler42041b2621c49925
org.codehaus.groovy.classgen.asm.BytecodeHelper.PrimitiveTypeHandler3b3fdf36ed0cbb44
org.codehaus.groovy.classgen.asm.BytecodeHelper.ReturnVarHandlerb9d67038ad2ce28f
org.codehaus.groovy.classgen.asm.BytecodeHelper.StoreVarHandler955b2a1d988f96ee
org.codehaus.groovy.classgen.asm.BytecodeVariable548e277d2ad7d746
org.codehaus.groovy.classgen.asm.CallSiteWriter5bdf04820727fdfb
org.codehaus.groovy.classgen.asm.ClosureWriter3fa6b94a99c98c9e
org.codehaus.groovy.classgen.asm.ClosureWriter.CorrectAccessedVariableVisitor4431362cc81ee729
org.codehaus.groovy.classgen.asm.CompileStack9373ec2140bd7a6c
org.codehaus.groovy.classgen.asm.CompileStack.BlockRecorder7879f7296c19cf7e
org.codehaus.groovy.classgen.asm.CompileStack.LabelRange8071ba168d1acf39
org.codehaus.groovy.classgen.asm.CompileStack.StateStackElement584a40a90ad1eefe
org.codehaus.groovy.classgen.asm.ExpressionAsVariableSlot6b83220df23eff5e
org.codehaus.groovy.classgen.asm.InvocationWritera05b981e9c7d0210
org.codehaus.groovy.classgen.asm.LambdaWriterdbd0cee4cedcd829
org.codehaus.groovy.classgen.asm.MethodCallera1104e99bc13e4d1
org.codehaus.groovy.classgen.asm.MethodCallerMultiAdapter97a709291ed8983c
org.codehaus.groovy.classgen.asm.MethodPointerExpressionWriter4f315f5d28ddcc11
org.codehaus.groovy.classgen.asm.MethodReferenceExpressionWriter7115e621fc9a949b
org.codehaus.groovy.classgen.asm.MopWriter7469163932ccc45b
org.codehaus.groovy.classgen.asm.MopWriter.MopKey4fbdb921fc52594f
org.codehaus.groovy.classgen.asm.OperandStack17f8cefec0a9b98b
org.codehaus.groovy.classgen.asm.OptimizingStatementWriterd89c9e30e2f6131a
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.OptVisitor4ffa08d63e7dd446
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.OptimizeFlagsCollectorfed30052afc5eb89
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.OptimizeFlagsCollector.OptimizeFlagsEntrya3b0bf7453fd78e0
org.codehaus.groovy.classgen.asm.OptimizingStatementWriter.StatementMeta4b174ecca84ab025
org.codehaus.groovy.classgen.asm.StatementMetaTypeChooserdf2afbf9bab39cfd
org.codehaus.groovy.classgen.asm.StatementWriter418329b984019c2c
org.codehaus.groovy.classgen.asm.UnaryExpressionHelperadf2399d560555f8
org.codehaus.groovy.classgen.asm.VariableSlotLoader261bba1b4e9323cd
org.codehaus.groovy.classgen.asm.WriterController9752f0e4ecf5ef2d
org.codehaus.groovy.classgen.asm.indy.IndyBinHelper68cb23716767f99c
org.codehaus.groovy.classgen.asm.indy.IndyCallSiteWriter71db9b56da823579
org.codehaus.groovy.classgen.asm.indy.InvokeDynamicWriter8afba64057b2d7df
org.codehaus.groovy.classgen.asm.util.TypeUtilf6e43fa9eb761e37
org.codehaus.groovy.control.ASTTransformationsContextbe817020f6427fbc
org.codehaus.groovy.control.AnnotationConstantsVisitor4c4f35b723a369df
org.codehaus.groovy.control.ClassNodeResolver531e0a78d584b8f7
org.codehaus.groovy.control.ClassNodeResolver.1c2f0e835dfc8c503
org.codehaus.groovy.control.ClassNodeResolver.LookupResult44003b5e329f1074
org.codehaus.groovy.control.CompilationUnit996ca07d5f1d41a8
org.codehaus.groovy.control.CompilationUnit.17792fdd16fcae8a0
org.codehaus.groovy.control.CompilationUnit.3993ab6db110ac6bc
org.codehaus.groovy.control.CompilationUnit.3.1256a97fae8feb575
org.codehaus.groovy.control.CompilationUnit.43be2321d06d45a40
org.codehaus.groovy.control.CompilationUnit.IPrimaryClassNodeOperation402adeb7464ef2b6
org.codehaus.groovy.control.CompilationUnit.ISourceUnitOperationaff3620b23874bcc
org.codehaus.groovy.control.CompilationUnit.SourceUnitOperationbb471d9229f703a3
org.codehaus.groovy.control.CompilePhaseb22dbd477dfa9d38
org.codehaus.groovy.control.CompilerConfiguration740bcc654cfc4a19
org.codehaus.groovy.control.CompilerConfiguration.1f173a454644cfac7
org.codehaus.groovy.control.ErrorCollector107ebd86303489b6
org.codehaus.groovy.control.GenericsVisitor56e9376ca483d9f7
org.codehaus.groovy.control.InstanceOfVerifier4d66b8f40c3a4443
org.codehaus.groovy.control.Janitor4af229605f083562
org.codehaus.groovy.control.LabelVerifier0c3320ac9b991c36
org.codehaus.groovy.control.OptimizerVisitorf2acbf777afe03aa
org.codehaus.groovy.control.ParserPluginFactory03efb2d58fc5243a
org.codehaus.groovy.control.ProcessingUnitdb1a7ffb374854e3
org.codehaus.groovy.control.ResolveVisitor5daa70889c482df5
org.codehaus.groovy.control.ResolveVisitor.ConstructedClassWithPackage54d65686f2cd6108
org.codehaus.groovy.control.ResolveVisitor.ConstructedNestedClasse7bf25467a5c0f25
org.codehaus.groovy.control.ResolveVisitor.LowerCaseClassd3881043430feaa9
org.codehaus.groovy.control.SourceExtensionHandler093f828e954a0cff
org.codehaus.groovy.control.SourceUnit0681e9d4c008d5a3
org.codehaus.groovy.control.StaticImportVisitor884e31eaec7b0e0d
org.codehaus.groovy.control.StaticVerifier583f75923bb31941
org.codehaus.groovy.control.io.AbstractReaderSource8ecbdb1fffcfcfac
org.codehaus.groovy.control.io.FileReaderSource991fb4f098ec1093
org.codehaus.groovy.control.io.StringReaderSource2b62c10aba2138e7
org.codehaus.groovy.control.io.URLReaderSource25b194176f07d8f3
org.codehaus.groovy.reflection.AccessPermissionChecker47507cada23adb0d
org.codehaus.groovy.reflection.CachedClass920463690bb9f8a5
org.codehaus.groovy.reflection.CachedClass.18be33c58e6d5737b
org.codehaus.groovy.reflection.CachedClass.28180aa4db7c0814c
org.codehaus.groovy.reflection.CachedClass.335102f40be711460
org.codehaus.groovy.reflection.CachedClass.487d8ac5b8adff89b
org.codehaus.groovy.reflection.CachedClass.56a83d30e2f4ea62a
org.codehaus.groovy.reflection.CachedClass.6bca0fdbcc58203d5
org.codehaus.groovy.reflection.CachedClass.79949d1ff0a33e913
org.codehaus.groovy.reflection.CachedClass.83b8329d42160ed15
org.codehaus.groovy.reflection.CachedClass.CachedMethodComparatorByNameb956c58b1f55d6ce
org.codehaus.groovy.reflection.CachedClass.CachedMethodComparatorWithString54ce2d333b8bc79b
org.codehaus.groovy.reflection.CachedConstructor882034b64b36245d
org.codehaus.groovy.reflection.CachedFieldfddb4b76f09ccd61
org.codehaus.groovy.reflection.CachedMethod27c700f22aa8f050
org.codehaus.groovy.reflection.ClassInfo44e2040d86bf5353
org.codehaus.groovy.reflection.ClassInfo.1f78e55b823a81e7b
org.codehaus.groovy.reflection.ClassInfo.GlobalClassSet163c8ea87ac2713b
org.codehaus.groovy.reflection.ClassInfo.LazyCachedClassRef01b66149fa06292d
org.codehaus.groovy.reflection.ClassInfo.LazyClassLoaderRef01769837de8d15cb
org.codehaus.groovy.reflection.GeneratedMetaMethodfc37e7784531d481
org.codehaus.groovy.reflection.GeneratedMetaMethod.DgmMethodRecord0a8d143f5cb43a8c
org.codehaus.groovy.reflection.GeneratedMetaMethod.Proxy0fe0f239c4325a6f
org.codehaus.groovy.reflection.GroovyClassValueFactoryb0126e453bd47f2f
org.codehaus.groovy.reflection.ParameterTypesd05c78080e7d812e
org.codehaus.groovy.reflection.ReflectionCache4817ce6c52eff5ad
org.codehaus.groovy.reflection.ReflectionUtils57c928d8ed81da0d
org.codehaus.groovy.reflection.ReflectionUtils.ClassContextHelperf61e7f90a0f67573
org.codehaus.groovy.reflection.stdclasses.ArrayCachedClass91b9297e1e2666f6
org.codehaus.groovy.reflection.stdclasses.BigDecimalCachedClass9ff3d5ebbd4711fe
org.codehaus.groovy.reflection.stdclasses.BigIntegerCachedClass6ac9a56c142df996
org.codehaus.groovy.reflection.stdclasses.BooleanCachedClass7eade73f5ecffb5c
org.codehaus.groovy.reflection.stdclasses.ByteCachedClassb72debd1104779f0
org.codehaus.groovy.reflection.stdclasses.CachedClosureClass7e1d8a2e9e189769
org.codehaus.groovy.reflection.stdclasses.CachedSAMClassfeffafd072c06769
org.codehaus.groovy.reflection.stdclasses.CharacterCachedClass8a9dfa6dcd3deceb
org.codehaus.groovy.reflection.stdclasses.DoubleCachedClass21d4f0b5bbcc0701
org.codehaus.groovy.reflection.stdclasses.FloatCachedClass3330130add11e649
org.codehaus.groovy.reflection.stdclasses.IntegerCachedClassfc99faf4b2c884e5
org.codehaus.groovy.reflection.stdclasses.LongCachedClasseaa4756692b2f3b6
org.codehaus.groovy.reflection.stdclasses.NumberCachedClass12c0ac00524486cd
org.codehaus.groovy.reflection.stdclasses.ObjectCachedClassed71720204d510e2
org.codehaus.groovy.reflection.stdclasses.ShortCachedClassebfdfb3eaefb9875
org.codehaus.groovy.reflection.stdclasses.StringCachedClass876f38bb5ecd8039
org.codehaus.groovy.reflection.v7.GroovyClassValueJava703cc9bee1dab9073
org.codehaus.groovy.runtime.ConversionHandler27162bb09ddc5127
org.codehaus.groovy.runtime.ConvertedClosure1256728e0d46e3a1
org.codehaus.groovy.runtime.DefaultCachedMethodKey6fb4878355605413
org.codehaus.groovy.runtime.DefaultGroovyMethodsa2dd0bb1ffc84431
org.codehaus.groovy.runtime.DefaultGroovyMethodsSupport8814a22ddf12b2d1
org.codehaus.groovy.runtime.EncodingGroovyMethodsa1384fddf9652e6e
org.codehaus.groovy.runtime.EncodingGroovyMethods.2da7f29923774e7f2
org.codehaus.groovy.runtime.FormatHelpere57dfd064cdd4244
org.codehaus.groovy.runtime.GStringImpl4ef2e5e23c0dd795
org.codehaus.groovy.runtime.GStringUtilab0547b9e7ed5540
org.codehaus.groovy.runtime.GroovyCategorySupport0319668bf29b71eb
org.codehaus.groovy.runtime.GroovyCategorySupport.MyThreadLocalce5e5e28f3ac1bf9
org.codehaus.groovy.runtime.HandleMetaClass85b0089e6fb42fd6
org.codehaus.groovy.runtime.IOGroovyMethods61a8a0b11759e40a
org.codehaus.groovy.runtime.IOGroovyMethods.2cdda3086312faa49
org.codehaus.groovy.runtime.InvokerHelper331f8ba6347b7d4e
org.codehaus.groovy.runtime.MetaClassHelpera458b0693a1e8d96
org.codehaus.groovy.runtime.MethodClosure9dfd0ae5c1c2201e
org.codehaus.groovy.runtime.MethodKey262f8918359b04b3
org.codehaus.groovy.runtime.NullObject61d374adbc534f8f
org.codehaus.groovy.runtime.NumberAwareComparator36b7ae92cc4be96c
org.codehaus.groovy.runtime.RangeInfo09cf4f1f9d0de986
org.codehaus.groovy.runtime.ResourceGroovyMethodsb5bc453becc81f56
org.codehaus.groovy.runtime.ScriptBytecodeAdapter8ca4c1ac110fd89c
org.codehaus.groovy.runtime.StringGroovyMethods3453f1a3c5d01875
org.codehaus.groovy.runtime.StringGroovyMethods.LineIterablebf0dc18b7a1cf8aa
org.codehaus.groovy.runtime.callsite.AbstractCallSitea26885994536a798
org.codehaus.groovy.runtime.callsite.BooleanClosureWrapperaf827dfd42d54386
org.codehaus.groovy.runtime.callsite.BooleanReturningMethodInvokerc75a55dbb9a427bc
org.codehaus.groovy.runtime.callsite.CallSiteArray3641939f994a7cf8
org.codehaus.groovy.runtime.callsite.CallSiteAwareMetaMethod44a7e7fe87ffbde3
org.codehaus.groovy.runtime.callsite.MetaClassSite92b8cbe717b1f3af
org.codehaus.groovy.runtime.callsite.MetaMethodSiteef94cbbe9b298ce8
org.codehaus.groovy.runtime.callsite.PlainObjectMetaMethodSitefc75d9e561279087
org.codehaus.groovy.runtime.callsite.PogoMetaClassSite5ab06245264ccd9e
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite2faeff01e54f6ef1
org.codehaus.groovy.runtime.callsite.PojoMetaMethodSite.PojoMetaMethodSiteNoUnwrapNoCoercef87a1e8980c6a4eb
org.codehaus.groovy.runtime.dgm.100aae927fc485d49aa
org.codehaus.groovy.runtime.dgm.101012e057f62ccb3d7
org.codehaus.groovy.runtime.dgm.101389408f45987c5c35
org.codehaus.groovy.runtime.dgm.1023de62621e0a44f9b
org.codehaus.groovy.runtime.dgm.103e596e5c897242707
org.codehaus.groovy.runtime.dgm.1044555752ecc475cf4
org.codehaus.groovy.runtime.dgm.106ea85ac00bb002f0a
org.codehaus.groovy.runtime.dgm.10998857f706d80dd637
org.codehaus.groovy.runtime.dgm.1174df0d93cedb2f6695
org.codehaus.groovy.runtime.dgm.1175050625c494e2f7a2
org.codehaus.groovy.runtime.dgm.1176b6a00988e45828b6
org.codehaus.groovy.runtime.dgm.117761f83555d9f8742a
org.codehaus.groovy.runtime.dgm.12f178d541c8d18d73
org.codehaus.groovy.runtime.dgm.1215693a8918c92c21ba
org.codehaus.groovy.runtime.dgm.12195e77731cc5a2778b
org.codehaus.groovy.runtime.dgm.12648a74264490aa4e65
org.codehaus.groovy.runtime.dgm.12651db8b3259666e346
org.codehaus.groovy.runtime.dgm.126627b3a933e9804063
org.codehaus.groovy.runtime.dgm.12671ecb2a2424659a2d
org.codehaus.groovy.runtime.dgm.1268ab9916f4bf8bef00
org.codehaus.groovy.runtime.dgm.127854e5180e0fe99c91
org.codehaus.groovy.runtime.dgm.129280b07f02a54ca838
org.codehaus.groovy.runtime.dgm.1296e71cf40b74682089
org.codehaus.groovy.runtime.dgm.1297d627b9c425b4cd14
org.codehaus.groovy.runtime.dgm.132df7e1144ddb837f
org.codehaus.groovy.runtime.dgm.13031895684fce56d9c4
org.codehaus.groovy.runtime.dgm.13046e1c8aaffb07bcf5
org.codehaus.groovy.runtime.dgm.1305dc7a95f1c4042642
org.codehaus.groovy.runtime.dgm.1311039957e6965cb56f
org.codehaus.groovy.runtime.dgm.13123687f65726a7be7b
org.codehaus.groovy.runtime.dgm.13134b15dd4f6d0723a6
org.codehaus.groovy.runtime.dgm.1314a1b2fe1e3cc89516
org.codehaus.groovy.runtime.dgm.1315191727ad63a0da75
org.codehaus.groovy.runtime.dgm.1316814abaec4be695d3
org.codehaus.groovy.runtime.dgm.1325fe2d94d6b9e5adbf
org.codehaus.groovy.runtime.dgm.1330ef6426f45b7c6a20
org.codehaus.groovy.runtime.dgm.13463601ef5655184c0
org.codehaus.groovy.runtime.dgm.135e2a91b8c33240254
org.codehaus.groovy.runtime.dgm.14a5f8cd49ef6bddab
org.codehaus.groovy.runtime.dgm.14700ac0cd3e537dc13
org.codehaus.groovy.runtime.dgm.1590b5c9292ae18ccd5
org.codehaus.groovy.runtime.dgm.16030d1661bf2f87a76
org.codehaus.groovy.runtime.dgm.171e3898abea7fb1a2c
org.codehaus.groovy.runtime.dgm.2048e2112ee81f155b
org.codehaus.groovy.runtime.dgm.20288d119ba45575351
org.codehaus.groovy.runtime.dgm.20406df94089bca1797
org.codehaus.groovy.runtime.dgm.205af6f0e578848ff58
org.codehaus.groovy.runtime.dgm.211e5bc3a127ac0bf1
org.codehaus.groovy.runtime.dgm.21867df4436d6c47734
org.codehaus.groovy.runtime.dgm.22d486eda94dbfd875
org.codehaus.groovy.runtime.dgm.2236cc7a85b54a8e1a9
org.codehaus.groovy.runtime.dgm.224a6448c9712eba852
org.codehaus.groovy.runtime.dgm.2255946aeeffcdf1623
org.codehaus.groovy.runtime.dgm.22606f07b8869f644c1
org.codehaus.groovy.runtime.dgm.229cd4e2bb6f7677c5e
org.codehaus.groovy.runtime.dgm.230b5c644a4197aed93
org.codehaus.groovy.runtime.dgm.231e81840048182902a
org.codehaus.groovy.runtime.dgm.23560c7091c7e5714a4
org.codehaus.groovy.runtime.dgm.2360cbf71280167cbff
org.codehaus.groovy.runtime.dgm.23894a43467f139fa2b
org.codehaus.groovy.runtime.dgm.239271d8da16235a861
org.codehaus.groovy.runtime.dgm.24c47a0097894ab675
org.codehaus.groovy.runtime.dgm.24441a421fb0c66199d
org.codehaus.groovy.runtime.dgm.24577b9fa308929b8f6
org.codehaus.groovy.runtime.dgm.24837f0ec5dfd5da720
org.codehaus.groovy.runtime.dgm.2499cd12d83c50ab147
org.codehaus.groovy.runtime.dgm.276e175c58fff55aeb
org.codehaus.groovy.runtime.dgm.302ed85a36793ea2cb7
org.codehaus.groovy.runtime.dgm.315428f6e76dae0a61
org.codehaus.groovy.runtime.dgm.3166cb008c10b196aed
org.codehaus.groovy.runtime.dgm.3193994ec0e2b0647a7
org.codehaus.groovy.runtime.dgm.3224f95d30a7ae3a2a
org.codehaus.groovy.runtime.dgm.32265320495afd048bc
org.codehaus.groovy.runtime.dgm.324953550b92200767c
org.codehaus.groovy.runtime.dgm.325fdd2650b0b0055c6
org.codehaus.groovy.runtime.dgm.326743940993f2b3358
org.codehaus.groovy.runtime.dgm.32876d4a1f42c2e0c84
org.codehaus.groovy.runtime.dgm.3290a8252c145476b49
org.codehaus.groovy.runtime.dgm.33278403f2df65db88
org.codehaus.groovy.runtime.dgm.3301b81e474ee68e32c
org.codehaus.groovy.runtime.dgm.331105324c2f8f43eba
org.codehaus.groovy.runtime.dgm.3325728e5eedbba1d02
org.codehaus.groovy.runtime.dgm.3337185a56c78d95219
org.codehaus.groovy.runtime.dgm.3346a37dc3ed448be59
org.codehaus.groovy.runtime.dgm.335985614ac295f048f
org.codehaus.groovy.runtime.dgm.3364cb7ff81870d3b8d
org.codehaus.groovy.runtime.dgm.337110bfeb7ec72f2c2
org.codehaus.groovy.runtime.dgm.3388e31957024059a03
org.codehaus.groovy.runtime.dgm.339c2b577e1adb5a202
org.codehaus.groovy.runtime.dgm.340441c0a0adbbe5df5
org.codehaus.groovy.runtime.dgm.341ac09c6f1ac790b31
org.codehaus.groovy.runtime.dgm.3425f4d7819680aa25b
org.codehaus.groovy.runtime.dgm.343abdcc7d945758479
org.codehaus.groovy.runtime.dgm.344ba5b77986b26aadc
org.codehaus.groovy.runtime.dgm.3451c90b79239142b52
org.codehaus.groovy.runtime.dgm.34656f1422acf4a5a83
org.codehaus.groovy.runtime.dgm.347559bba4bf5ed7350
org.codehaus.groovy.runtime.dgm.3489254272b19dbe482
org.codehaus.groovy.runtime.dgm.3495910bbe8855feaa9
org.codehaus.groovy.runtime.dgm.3509f99e6f1ab2c1a1d
org.codehaus.groovy.runtime.dgm.351f4d3a21dcd01d6bc
org.codehaus.groovy.runtime.dgm.352cf565b17e1722c88
org.codehaus.groovy.runtime.dgm.3535c65c872bdf1ba90
org.codehaus.groovy.runtime.dgm.3547f22239103f0a82e
org.codehaus.groovy.runtime.dgm.35533ddd82583b29921
org.codehaus.groovy.runtime.dgm.356a95644d0e8f08283
org.codehaus.groovy.runtime.dgm.35769517882ed1af7ee
org.codehaus.groovy.runtime.dgm.3586dca39907d89df3c
org.codehaus.groovy.runtime.dgm.35968e4dda1ae77721a
org.codehaus.groovy.runtime.dgm.36647ee7b06ae220bf
org.codehaus.groovy.runtime.dgm.360a683f9517312f2b3
org.codehaus.groovy.runtime.dgm.361cc7f41c80a6460ad
org.codehaus.groovy.runtime.dgm.362b859a5f5fe9b661c
org.codehaus.groovy.runtime.dgm.3631839327dc0abf7f6
org.codehaus.groovy.runtime.dgm.364cb5eaec24e961920
org.codehaus.groovy.runtime.dgm.368946f0673be692fc9
org.codehaus.groovy.runtime.dgm.369b108a1e5e20abe39
org.codehaus.groovy.runtime.dgm.370363feac86327dc4d
org.codehaus.groovy.runtime.dgm.37146324c1a007a5ab8
org.codehaus.groovy.runtime.dgm.385d92aaeb20494e55b
org.codehaus.groovy.runtime.dgm.4038054e44e16fd83aa
org.codehaus.groovy.runtime.dgm.404576d85cfac3db3a6
org.codehaus.groovy.runtime.dgm.405716f0faae3b38d70
org.codehaus.groovy.runtime.dgm.43409039802f081ef78
org.codehaus.groovy.runtime.dgm.4371a89f901de7ff33e
org.codehaus.groovy.runtime.dgm.461408813e161d34f0d
org.codehaus.groovy.runtime.dgm.463c58cb8f9678d15c3
org.codehaus.groovy.runtime.dgm.4802c6140c6dc4c06a5
org.codehaus.groovy.runtime.dgm.49233ffc7301d7ba5b1
org.codehaus.groovy.runtime.dgm.5002561da9d45e82d81
org.codehaus.groovy.runtime.dgm.539fe1c2ae474fce729
org.codehaus.groovy.runtime.dgm.5450d88b1ee2e2bc82
org.codehaus.groovy.runtime.dgm.540a3126ce23465a4d8
org.codehaus.groovy.runtime.dgm.543ef410de42b40c382
org.codehaus.groovy.runtime.dgm.544fcb980fd622e5e9a
org.codehaus.groovy.runtime.dgm.551d930dfb02448fa8
org.codehaus.groovy.runtime.dgm.569d01d7ca9c74c4af
org.codehaus.groovy.runtime.dgm.561e4603c1b04378f41
org.codehaus.groovy.runtime.dgm.56418b6bd012fa5027e
org.codehaus.groovy.runtime.dgm.56586997bcf5789c317
org.codehaus.groovy.runtime.dgm.5668e3d4fc8d1cdc836
org.codehaus.groovy.runtime.dgm.567beade89c59b7683a
org.codehaus.groovy.runtime.dgm.57fbc1f811b6366428
org.codehaus.groovy.runtime.dgm.5720a9c5cfda469bdb8
org.codehaus.groovy.runtime.dgm.58b9fcc07864e76e1d
org.codehaus.groovy.runtime.dgm.58578622ac87de958c4
org.codehaus.groovy.runtime.dgm.588bbd5977060b0ff63
org.codehaus.groovy.runtime.dgm.5970044cdff51189e0
org.codehaus.groovy.runtime.dgm.593918ce58ca6e3efb1
org.codehaus.groovy.runtime.dgm.594aff03d1a069c3e5b
org.codehaus.groovy.runtime.dgm.5956392926ce6fdc16b
org.codehaus.groovy.runtime.dgm.596c5b750f90afbd9c4
org.codehaus.groovy.runtime.dgm.5975eeffc8ff01d4ee9
org.codehaus.groovy.runtime.dgm.598f595208e632df474
org.codehaus.groovy.runtime.dgm.6066d36a0adf407732
org.codehaus.groovy.runtime.dgm.60872ffc929222647e5
org.codehaus.groovy.runtime.dgm.610ae0b434d87b90d85
org.codehaus.groovy.runtime.dgm.611ed09a6b5fd65c55a
org.codehaus.groovy.runtime.dgm.615cc1d903934ced749
org.codehaus.groovy.runtime.dgm.625791edb79096e1cb4
org.codehaus.groovy.runtime.dgm.6261f325c9cddf34475
org.codehaus.groovy.runtime.dgm.62797368f717d9ea6b4
org.codehaus.groovy.runtime.dgm.6309e6d5d698560d6c0
org.codehaus.groovy.runtime.dgm.631f27f6a958966d2a2
org.codehaus.groovy.runtime.dgm.634f1a2250a043b4575
org.codehaus.groovy.runtime.dgm.6353cc1d6f6349aa3bf
org.codehaus.groovy.runtime.dgm.638ab79f12a0da3970b
org.codehaus.groovy.runtime.dgm.639a5907f643547e479
org.codehaus.groovy.runtime.dgm.640163b3acd0b0885a9
org.codehaus.groovy.runtime.dgm.6418ef019e77108d2d5
org.codehaus.groovy.runtime.dgm.642dd3a5236209d8ae2
org.codehaus.groovy.runtime.dgm.6430a67cca481cb51ac
org.codehaus.groovy.runtime.dgm.644813195867cf0ed85
org.codehaus.groovy.runtime.dgm.645d74d628da6f44ebf
org.codehaus.groovy.runtime.dgm.668abded26c8584c239
org.codehaus.groovy.runtime.dgm.669ca271242a6154730
org.codehaus.groovy.runtime.dgm.67007b5dba29a88bb92
org.codehaus.groovy.runtime.dgm.671f4871ca50b69416a
org.codehaus.groovy.runtime.dgm.672b91f2b5b6c70fb9d
org.codehaus.groovy.runtime.dgm.6731184ba2358e9ce4f
org.codehaus.groovy.runtime.dgm.717174e9f19d3bbb319
org.codehaus.groovy.runtime.dgm.734eee17131de8c0298
org.codehaus.groovy.runtime.dgm.7362cfa6478a7230537
org.codehaus.groovy.runtime.dgm.739cd99e1e1dfb99c30
org.codehaus.groovy.runtime.dgm.743ce2d7ff5548be6af
org.codehaus.groovy.runtime.dgm.7443bcf660471cc4406
org.codehaus.groovy.runtime.dgm.748274baebaf8890c30
org.codehaus.groovy.runtime.dgm.78731170a2f80b9e26d
org.codehaus.groovy.runtime.dgm.79427cd4e767aae63b1
org.codehaus.groovy.runtime.dgm.804bccdf9c157cd62ca
org.codehaus.groovy.runtime.dgm.8098bfafdea3f4436f2
org.codehaus.groovy.runtime.dgm.8228deb4761cc4871e6
org.codehaus.groovy.runtime.dgm.847975add8cf0aa040f
org.codehaus.groovy.runtime.dgm.852f30ca5bab37554ce
org.codehaus.groovy.runtime.dgm.91199db483edb44821b
org.codehaus.groovy.runtime.dgm.9127c35f0dc6d9d153c
org.codehaus.groovy.runtime.dgm.9306a82ec3d29e1a823
org.codehaus.groovy.runtime.dgm.9426d7fa6b649497b4e
org.codehaus.groovy.runtime.dgm.950166c2d77c557ba6
org.codehaus.groovy.runtime.dgm.96b38a8116d6cf26f9
org.codehaus.groovy.runtime.dgm.979f06e4fb3093f8b1
org.codehaus.groovy.runtime.dgm.971c219e8253bea5f19
org.codehaus.groovy.runtime.dgm.9871c59f5000c191d4
org.codehaus.groovy.runtime.dgmimpl.NumberNumberDiv038fac4aa1042328
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMetaMethodab3f207bc2ec0cb1
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMinus72836fd0a0c4cfc7
org.codehaus.groovy.runtime.dgmimpl.NumberNumberMultiply893d3a42f2299c33
org.codehaus.groovy.runtime.dgmimpl.NumberNumberPlus2af25672c44c9ae3
org.codehaus.groovy.runtime.dgmimpl.arrays.ArrayGetAtMetaMethod2f6b85eb6b700b20
org.codehaus.groovy.runtime.dgmimpl.arrays.ArrayMetaMethod4d61b56a59617d55
org.codehaus.groovy.runtime.dgmimpl.arrays.ArrayPutAtMetaMethod2d8f8c9fa44f75e2
org.codehaus.groovy.runtime.dgmimpl.arrays.BooleanArrayGetAtMetaMethodb2544cfba72481f8
org.codehaus.groovy.runtime.dgmimpl.arrays.BooleanArrayPutAtMetaMethod55873775136aaa96
org.codehaus.groovy.runtime.dgmimpl.arrays.ByteArrayGetAtMetaMethod74dae371263b0ffb
org.codehaus.groovy.runtime.dgmimpl.arrays.ByteArrayPutAtMetaMethod89dbee301fd39071
org.codehaus.groovy.runtime.dgmimpl.arrays.CharacterArrayGetAtMetaMethodb5ac99abb919243b
org.codehaus.groovy.runtime.dgmimpl.arrays.CharacterArrayPutAtMetaMethodbb81215c9a0297b6
org.codehaus.groovy.runtime.dgmimpl.arrays.DoubleArrayGetAtMetaMethodbd4b73d57decd579
org.codehaus.groovy.runtime.dgmimpl.arrays.DoubleArrayPutAtMetaMethod3dedf41c37e952e6
org.codehaus.groovy.runtime.dgmimpl.arrays.FloatArrayGetAtMetaMethoded95ef3d6200c7eb
org.codehaus.groovy.runtime.dgmimpl.arrays.FloatArrayPutAtMetaMethodd077e3ee7ae48c10
org.codehaus.groovy.runtime.dgmimpl.arrays.IntegerArrayGetAtMetaMethodae42939b5466e260
org.codehaus.groovy.runtime.dgmimpl.arrays.IntegerArrayPutAtMetaMethodc05e612a2708ed22
org.codehaus.groovy.runtime.dgmimpl.arrays.LongArrayGetAtMetaMethod3b2f9786cdfaaae1
org.codehaus.groovy.runtime.dgmimpl.arrays.LongArrayPutAtMetaMethod45a38256480ae043
org.codehaus.groovy.runtime.dgmimpl.arrays.ObjectArrayGetAtMetaMethod343ef6bf585623f7
org.codehaus.groovy.runtime.dgmimpl.arrays.ObjectArrayPutAtMetaMethod635d1b360fe45847
org.codehaus.groovy.runtime.dgmimpl.arrays.ShortArrayGetAtMetaMethoddedba887f9abc72c
org.codehaus.groovy.runtime.dgmimpl.arrays.ShortArrayPutAtMetaMethod7abe1ed19cd0d22f
org.codehaus.groovy.runtime.m12n.ExtensionModuleed477465df3104e0
org.codehaus.groovy.runtime.m12n.ExtensionModuleRegistry5a9ededb5b4c8203
org.codehaus.groovy.runtime.m12n.ExtensionModuleScanner03ea9e8eca7123a0
org.codehaus.groovy.runtime.m12n.MetaInfExtensionModulee2ce4c687402ee1b
org.codehaus.groovy.runtime.m12n.PropertiesModuleFactory2d2571e8795435d7
org.codehaus.groovy.runtime.m12n.SimpleExtensionModule9f4cfabb739c78c8
org.codehaus.groovy.runtime.m12n.StandardPropertiesModuleFactory761cd2e69020b87b
org.codehaus.groovy.runtime.memoize.CommonCache0de71714d0fbaa98
org.codehaus.groovy.runtime.memoize.CommonCache.1c27c65abfc03158e
org.codehaus.groovy.runtime.memoize.ConcurrentCommonCachec85ed488dabdd610
org.codehaus.groovy.runtime.memoize.ConcurrentSoftCache5dbbbfbd2aaca6fe
org.codehaus.groovy.runtime.memoize.EvictableCache.EvictionStrategy33e339f00dc0260e
org.codehaus.groovy.runtime.memoize.StampedCommonCachea9632e2ff2fa1276
org.codehaus.groovy.runtime.memoize.UnlimitedConcurrentCacheeb1206ccdb102900
org.codehaus.groovy.runtime.metaclass.ClosureMetaClassd714fa51bdb808b7
org.codehaus.groovy.runtime.metaclass.ClosureMetaClass.StandardClosureChooser4a50e804b3f2fb9c
org.codehaus.groovy.runtime.metaclass.ClosureMetaMethod3ded06257bbb50f6
org.codehaus.groovy.runtime.metaclass.DefaultMetaClassInfo69854cb7a75ae059
org.codehaus.groovy.runtime.metaclass.DefaultMetaClassInfo.ConstantMetaClassVersioning19c0e73328fca174
org.codehaus.groovy.runtime.metaclass.MetaClassRegistryImpld7c88422b6078d2e
org.codehaus.groovy.runtime.metaclass.MetaClassRegistryImpl.DefaultModuleListener71c648598280e02b
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex0aa63dd7ad49fdd6
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex.CacheEntry20c734b95edbdd2d
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex.Entry03fde1af49a47261
org.codehaus.groovy.runtime.metaclass.MetaMethodIndex.Headerba7da134d7a1d743
org.codehaus.groovy.runtime.metaclass.MethodHelperdef2d014f23814ed
org.codehaus.groovy.runtime.metaclass.MethodMetaProperty6719bafb35e50059
org.codehaus.groovy.runtime.metaclass.MethodMetaProperty.GetBeanMethodMetaPropertya9731669b640a49c
org.codehaus.groovy.runtime.metaclass.MissingMethodExceptionNoStack343fc68c59de2dfb
org.codehaus.groovy.runtime.metaclass.MissingPropertyExceptionNoStack258ecd611b3af2fb
org.codehaus.groovy.runtime.metaclass.NewInstanceMetaMethod180ca5e049a0c70a
org.codehaus.groovy.runtime.metaclass.NewMetaMethodf3f964da2a9b40d4
org.codehaus.groovy.runtime.metaclass.NewStaticMetaMethod9b1964407056703c
org.codehaus.groovy.runtime.metaclass.ReflectionMetaMethoded055bc379c56f07
org.codehaus.groovy.runtime.metaclass.TransformMetaMethod9fb56641e1c6645f
org.codehaus.groovy.runtime.powerassert.Value19f2adc6181105d2
org.codehaus.groovy.runtime.powerassert.ValueRecorderaacc5f4cfb4e4355
org.codehaus.groovy.runtime.typehandling.BigDecimalMath4d9902048e564648
org.codehaus.groovy.runtime.typehandling.BigIntegerMathd803a9419e61fbf3
org.codehaus.groovy.runtime.typehandling.DefaultTypeTransformationb2c4387d4375532e
org.codehaus.groovy.runtime.typehandling.DefaultTypeTransformation.ArrayToUnmodifiableListAdapter4c6d210c030aa322
org.codehaus.groovy.runtime.typehandling.DefaultTypeTransformation.ArrayToUnmodifiableListAdapter.Itr9474f1fac9167246
org.codehaus.groovy.runtime.typehandling.FloatingPointMath8ab07fc7b236dd65
org.codehaus.groovy.runtime.typehandling.IntegerMath107d41aee36d7135
org.codehaus.groovy.runtime.typehandling.LongMath50042c5b069bf22f
org.codehaus.groovy.runtime.typehandling.NumberMathb85b68a1f30ed881
org.codehaus.groovy.runtime.typehandling.NumberMathModificationInfo16073598c4d4b809
org.codehaus.groovy.runtime.typehandling.ShortTypeHandlingc08b194108b9836a
org.codehaus.groovy.runtime.wrappers.PojoWrapper3741faa5c21fa787
org.codehaus.groovy.runtime.wrappers.Wrapper05c4e27a0f017ea6
org.codehaus.groovy.syntax.CSTNode9c95095c408ee026
org.codehaus.groovy.syntax.Numbersd931a1115a60761c
org.codehaus.groovy.syntax.Tokenbbe706a05ae1cfbc
org.codehaus.groovy.syntax.Typesf5e0a08b91e395a9
org.codehaus.groovy.tools.GroovyClasse520a4d734e2a8fc
org.codehaus.groovy.transform.ASTTransformationCollectorCodeVisitor72a87df1032ee3f2
org.codehaus.groovy.transform.ASTTransformationVisitor1f5b359ddf75cd2b
org.codehaus.groovy.transform.ASTTransformationVisitor.15ffb0c6700aefd7d
org.codehaus.groovy.transform.ASTTransformationVisitor.PriorityComparatore2fd871de956b9a5
org.codehaus.groovy.transform.AbstractASTTransformationca9d6d9bf6539b9d
org.codehaus.groovy.transform.AnnotationCollectorTransform.ClassChanger44fb321563c59340
org.codehaus.groovy.transform.BaseScriptASTTransformation815aa2be886dda6f
org.codehaus.groovy.transform.ImmutableASTTransformationdbc417cb9901a867
org.codehaus.groovy.transform.RecordTypeASTTransformation27cda53655e7e362
org.codehaus.groovy.transform.SealedASTTransformationda85b195ff2581cc
org.codehaus.groovy.transform.sc.StaticCompilationMetadataKeysa6a3062c7028b823
org.codehaus.groovy.transform.stc.AbstractExtensionMethodCache0d1e9f2bd2c4da66
org.codehaus.groovy.transform.stc.ExtensionMethodCache52e6e532f9ddc564
org.codehaus.groovy.transform.stc.StaticTypeCheckingSupportfcd4cb8422364b98
org.codehaus.groovy.transform.stc.StaticTypesMarkerf460ed64d77568a2
org.codehaus.groovy.transform.trait.SuperCallTraitTransformer990d8ddf3d469f6a
org.codehaus.groovy.transform.trait.TraitComposer2de0b96a67812ad2
org.codehaus.groovy.transform.trait.Traitsf5488878f5c44d04
org.codehaus.groovy.util.ArrayIteratorf582c4f807d5c550
org.codehaus.groovy.util.CharSequenceReadere9bd433fb40cce30
org.codehaus.groovy.util.ComplexKeyHashMapc801c1a5927f5528
org.codehaus.groovy.util.FastArrayb945b9f4917f9f43
org.codehaus.groovy.util.LazyReference0a3e7a1474054b14
org.codehaus.groovy.util.LazyReference.15b01fc8782a05a03
org.codehaus.groovy.util.LazyReference.233930cb46179f98d
org.codehaus.groovy.util.ListHashMapd7c1fa08cf1062f2
org.codehaus.groovy.util.LockableObject6ed59ce8262492d3
org.codehaus.groovy.util.ManagedConcurrentLinkedQueuebe31bdbbff860e62
org.codehaus.groovy.util.ManagedConcurrentLinkedQueue.Element741c99bf25e0d9bc
org.codehaus.groovy.util.ManagedConcurrentLinkedQueue.Itr4433e4cd0adb5841
org.codehaus.groovy.util.ManagedReference8da1298990b125a0
org.codehaus.groovy.util.ManagedReference.15470866116c99219
org.codehaus.groovy.util.ReferenceBundle637a9c10b34d98ad
org.codehaus.groovy.util.ReferenceManager1c01e820c5ba93db
org.codehaus.groovy.util.ReferenceManager.10df3dac8637686eb
org.codehaus.groovy.util.ReferenceManager.CallBackedManagerdb00888013e9be2c
org.codehaus.groovy.util.ReferenceTypef13e4ca82dc788f9
org.codehaus.groovy.util.ReferenceType.1d060dc74d9930918
org.codehaus.groovy.util.ReferenceType.2b86d31584085c531
org.codehaus.groovy.util.ReferenceType.39d616716af5c302e
org.codehaus.groovy.util.ReferenceType.48fd188adb44cedc0
org.codehaus.groovy.util.ReferenceType.HardRef4cc246f357c4090c
org.codehaus.groovy.util.ReferenceType.SoftRefaa887fa8ed19e3ec
org.codehaus.groovy.util.ReferenceType.WeakRefd0e05e81ab61dc95
org.codehaus.groovy.util.ReleaseInfofc0a62f7c2c749bd
org.codehaus.groovy.util.TripleKeyHashMap086876a0a70e4f98
org.codehaus.groovy.util.URLStreamsce7e066e58a43e4b
org.codehaus.groovy.vmplugin.VMPluginbf85144d47335304
org.codehaus.groovy.vmplugin.VMPluginFactory8d2a4ff9c1430923
org.codehaus.groovy.vmplugin.v8.CacheableCallSite5db7ff765db084fa
org.codehaus.groovy.vmplugin.v8.CacheableCallSite.1a5cfaa071644608d
org.codehaus.groovy.vmplugin.v8.IndyArrayAccess3eb693d876dc3dc5
org.codehaus.groovy.vmplugin.v8.IndyGuardsFiltersAndSignatures01e15bd1fa97f2a9
org.codehaus.groovy.vmplugin.v8.IndyInterface3b427312f600c386
org.codehaus.groovy.vmplugin.v8.IndyInterface.CallTypefc0a53e9d3642aa9
org.codehaus.groovy.vmplugin.v8.IndyInterface.FallbackSupplier16840c9bbf82dadf
org.codehaus.groovy.vmplugin.v8.IndyMath0552ca18a43331e3
org.codehaus.groovy.vmplugin.v8.Java8a452ba9d7583e3ef
org.codehaus.groovy.vmplugin.v8.Java8.13da115f4842b203e
org.codehaus.groovy.vmplugin.v8.MethodHandleWrapper01b9dcabff568fcd
org.codehaus.groovy.vmplugin.v8.MethodHandleWrapper.NullMethodHandleWrapper58f0576f3d10e39d
org.codehaus.groovy.vmplugin.v8.Selector00d236412b39f3da
org.codehaus.groovy.vmplugin.v8.Selector.1f6aaa48e97fa18d7
org.codehaus.groovy.vmplugin.v8.Selector.CastSelectoreddbb87a5ba51adc
org.codehaus.groovy.vmplugin.v8.Selector.InitSelectorac8819f9591a4abf
org.codehaus.groovy.vmplugin.v8.Selector.MethodSelector9c97a7cc3a1c82db
org.codehaus.groovy.vmplugin.v8.Selector.PropertySelectorae6c416122c2ba7f
org.codehaus.groovy.vmplugin.v8.TypeHelperae4280fee109c2cf
org.codehaus.groovy.vmplugin.v8.TypeTransformersa44582bee98351f4
org.gradle.api.internal.tasks.testing.AbstractTestDescriptorb7d6764e5c2ed1e2
org.gradle.api.internal.tasks.testing.DefaultParameterizedTestDescriptor723d1b1438bf2a1c
org.gradle.api.internal.tasks.testing.DefaultTestClassDescriptor29a580f844a707e9
org.gradle.api.internal.tasks.testing.DefaultTestClassRunInfo68a7e79b2914fd4d
org.gradle.api.internal.tasks.testing.DefaultTestDescriptor33f0f28b81218a57
org.gradle.api.internal.tasks.testing.DefaultTestOutputEvent8b3d72b91c24a69b
org.gradle.api.internal.tasks.testing.DefaultTestSuiteDescriptor7ca2225e2fb0b4b2
org.gradle.api.internal.tasks.testing.SuiteTestClassProcessorcce43c0b36c4880f
org.gradle.api.internal.tasks.testing.TestCompleteEvent94a6da85674017e0
org.gradle.api.internal.tasks.testing.TestStartEvent739a2bff9c36ddab
org.gradle.api.internal.tasks.testing.failure.DefaultThrowableToTestFailureMapper98b3c6d95620e628
org.gradle.api.internal.tasks.testing.failure.TestFailureMapperfad0361b08728e0d
org.gradle.api.internal.tasks.testing.failure.mappers.AssertErrorMapperf8f52c2b08659a75
org.gradle.api.internal.tasks.testing.failure.mappers.AssertjMultipleAssertionsErrorMapper6d9c88eceee97e47
org.gradle.api.internal.tasks.testing.failure.mappers.JUnitComparisonTestFailureMapper1870ccedd70c62d3
org.gradle.api.internal.tasks.testing.failure.mappers.OpenTestAssertionFailedMapper670c8d72a39d27d6
org.gradle.api.internal.tasks.testing.failure.mappers.OpenTestMultipleFailuresErrorMapperfd9e46b889182617
org.gradle.api.internal.tasks.testing.filter.TestFilterSpeca7526e6ebab295a4
org.gradle.api.internal.tasks.testing.junit.AbstractJUnitTestClassProcessore052047f692cb949
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformSpecf5579f12caeb524a
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestClassProcessor68676c8e20078af0
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestClassProcessor.BackwardsCompatibleLauncherSession6103e40f3dbe5881
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestClassProcessor.CollectAllTestClassesExecutoredf39f312e150ffd
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestClassProcessorFactoryaf09c0eae5fa5ced
org.gradle.api.internal.tasks.testing.junitplatform.JUnitPlatformTestExecutionListenerb76948e94474cdbc
org.gradle.api.internal.tasks.testing.processors.CaptureTestOutputTestResultProcessore0ec4a50ce7a0011
org.gradle.api.internal.tasks.testing.redirector.DefaultStandardOutputRedirector66011962fbed0a65
org.gradle.api.internal.tasks.testing.redirector.DefaultStandardOutputRedirector.DiscardActionc8ea3545fa32c9b3
org.gradle.api.internal.tasks.testing.redirector.DefaultStandardOutputRedirector.WriteAction77c2ee9a9fa842b1
org.gradle.api.internal.tasks.testing.redirector.JULRedirector14551eb76b8ecae2
org.gradle.api.internal.tasks.testing.redirector.TestOutputRedirector7ff936d162af4d8a
org.gradle.api.internal.tasks.testing.redirector.TestOutputRedirector.Forwarder93ca0532f92cb2b9
org.gradle.api.internal.tasks.testing.results.AttachParentTestResultProcessor6d02567fd2a7d62d
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer86c8a2a7f444af17
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultNestedTestSuiteDescriptorSerializerd06ce1b3d75f97ec
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultParameterizedTestDescriptorSerializer110fe58b6a08d18a
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestClassDescriptorSerializer7a33def0e5f77169
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestClassRunInfoSerializer756d4261d461b736
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestDescriptorSerializerf22763fb9799791b
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestFailureSerializera3fa0d4e2033ff7b
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestMethodDescriptorSerializer8399fed70a5a5583
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestOutputEventSerializerab68b99e3ba8e6a7
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.DefaultTestSuiteDescriptorSerializer00e4063cb0a3717e
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.IdSerializercdde38a9abcdd3c1
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.NullableSerializer0b8b3e72f4fb326b
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.TestCompleteEventSerializer971566bf8e6bbbc9
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.TestStartEventSerializer46e7b262d38e1858
org.gradle.api.internal.tasks.testing.worker.TestEventSerializer.WorkerTestSuiteDescriptorSerializere06ab7fe365be81b
org.gradle.api.internal.tasks.testing.worker.TestWorkerb93df52c3074c532
org.gradle.api.internal.tasks.testing.worker.TestWorker.1b8314e777e355f7d
org.gradle.api.internal.tasks.testing.worker.TestWorker.2cc4e0ec7222faee6
org.gradle.api.internal.tasks.testing.worker.TestWorker.3030c348be1c106a4
org.gradle.api.internal.tasks.testing.worker.TestWorker.State903c35047a140ff9
org.gradle.api.internal.tasks.testing.worker.TestWorker.TestFrameworkServiceRegistry12a7a3db90dec4e3
org.gradle.api.internal.tasks.testing.worker.WorkerTestClassProcessor0f6525b02f2bb3d2
org.gradle.api.internal.tasks.testing.worker.WorkerTestClassProcessor.WorkerTestSuiteDescriptorb3807e9b92351840
org.gradle.api.logging.LogLevel236e938e30516638
org.gradle.api.tasks.testing.TestOutputEvent.Destinationa16caef1c278a81f
org.gradle.api.tasks.testing.TestResult.ResultType12ffe39597b5f810
org.gradle.internal.Cast6130c81e08d81640
org.gradle.internal.MutableBoolean349de2b8a37d4338
org.gradle.internal.SystemPropertiesf454284e0229cc37
org.gradle.internal.actor.internal.DefaultActorFactory43acc9e5b3a492ea
org.gradle.internal.actor.internal.DefaultActorFactory.BlockingActore2e64c7c8141667f
org.gradle.internal.concurrent.AbstractDelegatingExecutorService49d3dededcea40d0
org.gradle.internal.concurrent.AbstractManagedExecutor3e801af2d608b388
org.gradle.internal.concurrent.AbstractManagedExecutor.1e617b5280c0b131e
org.gradle.internal.concurrent.CompositeStoppable4a36b916ba0a65be
org.gradle.internal.concurrent.CompositeStoppable.17991c35593ad107a
org.gradle.internal.concurrent.CompositeStoppable.3e70f2f4c49f9854e
org.gradle.internal.concurrent.DefaultExecutorFactory63847aa635eddd82
org.gradle.internal.concurrent.DefaultExecutorFactory.TrackedManagedExecutor36f4bc1cd93c039c
org.gradle.internal.concurrent.ExecutorPolicy.CatchAndRecordFailures2aacf6d3d0dd2240
org.gradle.internal.concurrent.ManagedExecutorImplce6f255f6fc1de83
org.gradle.internal.concurrent.ThreadFactoryImpl1d388becbfb01ad8
org.gradle.internal.dispatch.ContextClassLoaderDispatch132d0c3fd93e8141
org.gradle.internal.dispatch.ContextClassLoaderProxyd72ee515504b89e6
org.gradle.internal.dispatch.MethodInvocationbbd5401404e52b1f
org.gradle.internal.dispatch.ProxyDispatchAdapter67194db65692ab5d
org.gradle.internal.dispatch.ProxyDispatchAdapter.DispatchingInvocationHandler82935bb9f2db6b85
org.gradle.internal.dispatch.ReflectionDispatch6976fdf67f3e8979
org.gradle.internal.event.AbstractBroadcastDispatch0f5ffe97fa60f855
org.gradle.internal.event.BroadcastDispatch971f57b98f410335
org.gradle.internal.event.BroadcastDispatch.CompositeDispatch5d8a079ebb58640f
org.gradle.internal.event.BroadcastDispatch.EmptyDispatch136c24cb564bd0f5
org.gradle.internal.event.BroadcastDispatch.SingletonDispatch66b75507cc7e3700
org.gradle.internal.event.ListenerBroadcast68e3a1fc76137d54
org.gradle.internal.id.CompositeIdGeneratorf1c607aa5fccdbaa
org.gradle.internal.id.CompositeIdGenerator.CompositeIde710c854f802c58b
org.gradle.internal.id.LongIdGenerator6f8168bf486a560d
org.gradle.internal.id.UUIDGenerator047a43ab94df6ffa
org.gradle.internal.io.BufferCaster88a8af829d9f2dca
org.gradle.internal.io.LineBufferingOutputStream6219fe05ee4c9468
org.gradle.internal.io.LinePerThreadBufferingOutputStream6663ead4c1825a46
org.gradle.internal.io.NullOutputStreameefcfe0665bbfe4c
org.gradle.internal.io.StreamByteBuffere2c8064318ed213e
org.gradle.internal.io.StreamByteBuffer.StreamByteBufferChunk601a3b84d114befa
org.gradle.internal.io.StreamByteBuffer.StreamByteBufferInputStream1ba4f1560d8a3753
org.gradle.internal.io.StreamByteBuffer.StreamByteBufferOutputStream9243acabfced691c
org.gradle.internal.logging.config.LoggingSystemAdapter2bb5150ee66232e9
org.gradle.internal.logging.config.LoggingSystemAdapter.SnapshotImpl221de860d84422df
org.gradle.internal.logging.console.DefaultUserInputReceiver44c8536611e071b4
org.gradle.internal.logging.events.EndOutputEvent0d8edd2a5ce274ee
org.gradle.internal.logging.events.LogLevelChangeEvent33b762c6d5852de7
org.gradle.internal.logging.events.OutputEvent85bce87f1bcda18d
org.gradle.internal.logging.events.OutputEventListenere7d50a9306531b49
org.gradle.internal.logging.events.OutputEventListener.15c6014dff2070607
org.gradle.internal.logging.events.StyledTextOutputEvent11f48fb1a17330c8
org.gradle.internal.logging.events.StyledTextOutputEvent.Spanc55f799d2631f13d
org.gradle.internal.logging.serializer.LogEventSerializerb6d88af223db296a
org.gradle.internal.logging.serializer.LogLevelChangeEventSerializerf77a59533dde75ec
org.gradle.internal.logging.serializer.SpanSerializer5f773b7d1ad07c9f
org.gradle.internal.logging.serializer.StyledTextOutputEventSerializerfaebed27ac3e65ba
org.gradle.internal.logging.services.DefaultLoggingManager61e216a064052ff1
org.gradle.internal.logging.services.DefaultLoggingManager.StartableLoggingRouter78396be937af48de
org.gradle.internal.logging.services.DefaultLoggingManager.StartableLoggingSystemb121a97021902643
org.gradle.internal.logging.services.DefaultLoggingManagerFactoryeb1ab97193f0d177
org.gradle.internal.logging.services.LoggingServiceRegistryb54e061596ce61fc
org.gradle.internal.logging.services.LoggingServiceRegistry.1b3c4addf0f9c5893
org.gradle.internal.logging.services.LoggingServiceRegistry.CommandLineLoggingab0f0f2f9e415a5d
org.gradle.internal.logging.services.TextStreamOutputEventListeneref4d0c3267356598
org.gradle.internal.logging.sink.OutputEventListenerManagerd6dee3d6fea49020
org.gradle.internal.logging.sink.OutputEventListenerManager.11e218a705ff0ee7e
org.gradle.internal.logging.sink.OutputEventRendererad1cc4fc552ab292
org.gradle.internal.logging.sink.OutputEventRenderer.1b94418b2e260aede
org.gradle.internal.logging.sink.OutputEventRenderer.2a956801cd98ca633
org.gradle.internal.logging.sink.OutputEventRenderer.LazyListener7ee882569b166e1e
org.gradle.internal.logging.sink.OutputEventRenderer.SnapshotImpl827634628a47c5f5
org.gradle.internal.logging.sink.OutputEventTransformer06c2270eef0e291e
org.gradle.internal.logging.slf4j.BuildOperationAwareLogger6a70f9123229323f
org.gradle.internal.logging.slf4j.OutputEventListenerBackedLogger30ddd0a8ff91b5f5
org.gradle.internal.logging.slf4j.OutputEventListenerBackedLoggerContext9d35d4b7a722eb08
org.gradle.internal.logging.slf4j.OutputEventListenerBackedLoggerContext.NoOpLogger3868cc8d50014a37
org.gradle.internal.logging.slf4j.Slf4jLoggingConfigurer75fba29c3739b15f
org.gradle.internal.logging.source.DefaultStdErrLoggingSystemfd3dd0caab2f1d95
org.gradle.internal.logging.source.DefaultStdOutLoggingSystem528bb39bfb67c3ae
org.gradle.internal.logging.source.JavaUtilLoggingSystem5e967b17aabfd442
org.gradle.internal.logging.source.JavaUtilLoggingSystem.SnapshotImpl15dfc30250723749
org.gradle.internal.logging.source.PrintStreamLoggingSystem1ae6e6b715c6b3f9
org.gradle.internal.logging.source.PrintStreamLoggingSystem.165643cb979acba64
org.gradle.internal.logging.source.PrintStreamLoggingSystem.OutputEventDestination8c1ddf1476568828
org.gradle.internal.logging.source.PrintStreamLoggingSystem.PrintStreamDestination9e7273f370028123
org.gradle.internal.logging.source.PrintStreamLoggingSystem.SnapshotImpl8f80a46f9780a57e
org.gradle.internal.logging.text.StyledTextOutput.Styled676557b62e3f601
org.gradle.internal.nativeintegration.filesystem.services.FileSystemServices4556096f1c5a9c8e
org.gradle.internal.nativeintegration.jansi.DefaultJansiRuntimeResolver913dbea9c5665791
org.gradle.internal.nativeintegration.jansi.JansiBootPathConfigurer3a766bce65ac1a48
org.gradle.internal.nativeintegration.jansi.JansiLibraryFactory0cbaac430d6656c4
org.gradle.internal.nativeintegration.jansi.JansiStorageLocatorc8bff1ccb071f9b6
org.gradle.internal.nativeintegration.services.NativeServicese9eace075b8c8a16
org.gradle.internal.nativeintegration.services.NativeServices.16882d4780ccaa810
org.gradle.internal.nativeintegration.services.NativeServices.NativeFeatures70decf07458f8bcc
org.gradle.internal.nativeintegration.services.NativeServices.NativeFeatures.1d779b50dcb640ca1
org.gradle.internal.nativeintegration.services.NativeServices.NativeFeatures.2c75506e3562f5bfc
org.gradle.internal.nativeintegration.services.NativeServices.NativeServicesMode32fb288ccd674693
org.gradle.internal.nativeintegration.services.NativeServices.NativeServicesMode.1b08a5592f2ef59e9
org.gradle.internal.nativeintegration.services.NativeServices.NativeServicesMode.22d7b0e59d80896d0
org.gradle.internal.nativeintegration.services.NativeServices.NativeServicesMode.3768233bcc6732f74
org.gradle.internal.reflect.JavaMethod5541c31d24227b86
org.gradle.internal.remote.internal.KryoBackedMessageSerializer0028157720ec1f27
org.gradle.internal.remote.internal.hub.ConnectionSet323708d9214e34e4
org.gradle.internal.remote.internal.hub.ConnectionState250fb1b274991d9a
org.gradle.internal.remote.internal.hub.DefaultMethodArgsSerializerb5f4b38125033ffd
org.gradle.internal.remote.internal.hub.DefaultMethodArgsSerializer.ArraySerializer16505d5ccbb1b78b
org.gradle.internal.remote.internal.hub.DefaultMethodArgsSerializer.EmptyArraySerializercdc53c79a631aa33
org.gradle.internal.remote.internal.hub.IncomingQueue0e8ecdb8f31efe51
org.gradle.internal.remote.internal.hub.InterHubMessageSerializer7d84d4aa85858c73
org.gradle.internal.remote.internal.hub.InterHubMessageSerializer.MessageReaderab1cd6753eb75a29
org.gradle.internal.remote.internal.hub.InterHubMessageSerializer.MessageWriter3e4611f758508afb
org.gradle.internal.remote.internal.hub.JavaSerializationBackedMethodArgsSerializer4c7a738ee4525ff6
org.gradle.internal.remote.internal.hub.MessageHub1326887a1f1da0ac
org.gradle.internal.remote.internal.hub.MessageHub.ChannelDispatch8a9dfd1b6306d8e6
org.gradle.internal.remote.internal.hub.MessageHub.ConnectionDispatchdf1d0a86180d66e4
org.gradle.internal.remote.internal.hub.MessageHub.ConnectionReceivee1dc78071e8e957d
org.gradle.internal.remote.internal.hub.MessageHub.Discard63a8d677cc1f9101
org.gradle.internal.remote.internal.hub.MessageHub.Handler3d232f51f2c02828
org.gradle.internal.remote.internal.hub.MessageHub.State1b76747d7bce6b89
org.gradle.internal.remote.internal.hub.MessageHubBackedClient77c2124c3c43d832
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnectionc23964928f1aff22
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection.1ac806a6bc6b1b21d
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection.28ac38215966e3a20
org.gradle.internal.remote.internal.hub.MessageHubBackedObjectConnection.DispatchWrapper9aa5d8679dbc6601
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer47063ab293644e83
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer.MethodDetailsb6b7fb55e88cc4b9
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer.MethodInvocationReadere6b939136f207ff5
org.gradle.internal.remote.internal.hub.MethodInvocationSerializer.MethodInvocationWriterc3b77db1b2556afe
org.gradle.internal.remote.internal.hub.OutgoingQueuefbcc05506ad40c68
org.gradle.internal.remote.internal.hub.protocol.ChannelIdentifier7697ff6a7c712869
org.gradle.internal.remote.internal.hub.protocol.ChannelMessage9bff479666e58802
org.gradle.internal.remote.internal.hub.protocol.EndOfStreamf29ffed85365f7db
org.gradle.internal.remote.internal.hub.protocol.InterHubMessage0c6e49b6ec077e16
org.gradle.internal.remote.internal.hub.protocol.InterHubMessage.Delivery0652d09c2a7fd1ac
org.gradle.internal.remote.internal.hub.queue.EndPointQueue8038a5636529123d
org.gradle.internal.remote.internal.hub.queue.MultiChannelQueuebcaac9c224068764
org.gradle.internal.remote.internal.hub.queue.MultiEndPointQueue27222a892157733f
org.gradle.internal.remote.internal.hub.queue.MultiEndPointQueue.144049b3edc682954
org.gradle.internal.remote.internal.hub.queue.QueueInitializerad18361c23e679b1
org.gradle.internal.remote.internal.inet.MultiChoiceAddress91381aa03cdd48e7
org.gradle.internal.remote.internal.inet.MultiChoiceAddressSerializer7ffc395650705aaa
org.gradle.internal.remote.internal.inet.SocketConnectCompletion0da46ac4ccd1c9ce
org.gradle.internal.remote.internal.inet.SocketConnectionb2e24cbbef327fb6
org.gradle.internal.remote.internal.inet.SocketConnection.11504f3fae897ccd8
org.gradle.internal.remote.internal.inet.SocketConnection.SocketInputStream2b93b10b91da561a
org.gradle.internal.remote.internal.inet.SocketConnection.SocketOutputStream7a65a0abc20061c4
org.gradle.internal.remote.internal.inet.SocketInetAddress20cc3fd7992230e8
org.gradle.internal.remote.internal.inet.SocketInetAddress.Serializerd42dd7f644e6367c
org.gradle.internal.remote.internal.inet.TcpOutgoingConnectore658ec26090de909
org.gradle.internal.remote.services.MessagingServices371e97275eb35dc5
org.gradle.internal.serialize.AbstractCollectionSerializer7897b7a9a0c39b1b
org.gradle.internal.serialize.AbstractDecoder6f331f65d3691839
org.gradle.internal.serialize.AbstractEncoder44ea8279ea7b3a07
org.gradle.internal.serialize.AbstractSerializerd5cd8744f99ef12d
org.gradle.internal.serialize.BaseSerializerFactory83d030ef5f6c0526
org.gradle.internal.serialize.BaseSerializerFactory.BigDecimalSerializer4dcd516a5e9202d8
org.gradle.internal.serialize.BaseSerializerFactory.BigIntegerSerializer50f6ab963a855e2a
org.gradle.internal.serialize.BaseSerializerFactory.BooleanSerializer9a343eeb20f2b7f4
org.gradle.internal.serialize.BaseSerializerFactory.ByteArraySerializer9566e41ef84566ae
org.gradle.internal.serialize.BaseSerializerFactory.ByteSerializer85286889be7534cf
org.gradle.internal.serialize.BaseSerializerFactory.CharSerializer6fba21fa805857ff
org.gradle.internal.serialize.BaseSerializerFactory.DoubleSerializer085b687ded9be124
org.gradle.internal.serialize.BaseSerializerFactory.EnumSerializera314f7118f1d0412
org.gradle.internal.serialize.BaseSerializerFactory.FileSerializerc43bc85ad47073ee
org.gradle.internal.serialize.BaseSerializerFactory.FloatSerializerdad7a35798e49d9d
org.gradle.internal.serialize.BaseSerializerFactory.IntegerSerializere8a55740afa66954
org.gradle.internal.serialize.BaseSerializerFactory.LongSerializer3a08a2ef15abca0d
org.gradle.internal.serialize.BaseSerializerFactory.PathSerializer22894f3c1859ada1
org.gradle.internal.serialize.BaseSerializerFactory.ShortSerializer5791e5a0a4ad1ece
org.gradle.internal.serialize.BaseSerializerFactory.StringMapSerializereb348217154fd0ed
org.gradle.internal.serialize.BaseSerializerFactory.StringSerializer01576ecfb2720760
org.gradle.internal.serialize.BaseSerializerFactory.ThrowableSerializerae463fe767977ceb
org.gradle.internal.serialize.ClassLoaderObjectInputStream81d9f3a2338180d3
org.gradle.internal.serialize.DefaultSerializer9b7593104d5f803c
org.gradle.internal.serialize.DefaultSerializerRegistry84449bcf590c1af7
org.gradle.internal.serialize.DefaultSerializerRegistry.1aeba2bb0cd2eab52
org.gradle.internal.serialize.DefaultSerializerRegistry.HierarchySerializerMatcherc4fa93579434fd2b
org.gradle.internal.serialize.DefaultSerializerRegistry.InstanceBasedSerializerFactory4d56c9c7fbddbcc0
org.gradle.internal.serialize.DefaultSerializerRegistry.SerializerClassMatcherStrategyea36ea8beff22743
org.gradle.internal.serialize.DefaultSerializerRegistry.StrictSerializerMatcher6df6080c06573b93
org.gradle.internal.serialize.DefaultSerializerRegistry.TaggedTypeSerializer264fbb605d976b35
org.gradle.internal.serialize.DefaultSerializerRegistry.TypeInfobd6904d4ac5974ce
org.gradle.internal.serialize.HashCodeSerializer874be2a480b96af8
org.gradle.internal.serialize.InputStreamBackedDecoder1a43def6f05c6405
org.gradle.internal.serialize.ListSerializer0f7667fccd800b0a
org.gradle.internal.serialize.kryo.KryoBackedDecoderf9287092db21f40c
org.gradle.internal.serialize.kryo.KryoBackedEncodere6a2be1dd138a272
org.gradle.internal.serialize.kryo.TypeSafeSerializer1dbc9e4c69fd1973
org.gradle.internal.serialize.kryo.TypeSafeSerializer.1bb88df969641a032
org.gradle.internal.serialize.kryo.TypeSafeSerializer.2599bac595545b9c0
org.gradle.internal.service.AbstractServiceMethodd8f9bf72435aa0d5
org.gradle.internal.service.DefaultServiceAccessToken552643647739457e
org.gradle.internal.service.DefaultServiceMethodFactory7cd5dc9e6187cc39
org.gradle.internal.service.DefaultServiceRegistryf68ae7203f07c4d5
org.gradle.internal.service.DefaultServiceRegistry.1a5957bd6a7e35017
org.gradle.internal.service.DefaultServiceRegistry.ClassInspector1c6a6d6c1cdffdbf
org.gradle.internal.service.DefaultServiceRegistry.ClassInspector.ClassDetails28d8b68c10eaba0e
org.gradle.internal.service.DefaultServiceRegistry.CompositeServiceProvider2097212676e37037
org.gradle.internal.service.DefaultServiceRegistry.ConstructorService2c11170a2e97afe8
org.gradle.internal.service.DefaultServiceRegistry.FactoryMethodServicec94f704959a77ddb
org.gradle.internal.service.DefaultServiceRegistry.FactoryService5efa06cdad2305e5
org.gradle.internal.service.DefaultServiceRegistry.FixedInstanceServiceadf12b7e82dfb3fd
org.gradle.internal.service.DefaultServiceRegistry.ManagedObjectServiceProvider00ae876b89fa7714
org.gradle.internal.service.DefaultServiceRegistry.OwnServicesd7eed1a352f0d8e6
org.gradle.internal.service.DefaultServiceRegistry.ParentServicesba013570aeebc2c6
org.gradle.internal.service.DefaultServiceRegistry.SingletonServiced319469c908bc1cf
org.gradle.internal.service.DefaultServiceRegistry.SingletonService.1cdfec069e7d8ab42
org.gradle.internal.service.DefaultServiceRegistry.SingletonService.BindStateb653da4aa2ccd9df
org.gradle.internal.service.DefaultServiceRegistry.State76b519b0b74b53cb
org.gradle.internal.service.DefaultServiceRegistry.ThisAsService27c6f6b9bde39276
org.gradle.internal.service.InjectUtil4e32c5f95305147b
org.gradle.internal.service.MethodHandleBasedServiceMethod674037aa99129b52
org.gradle.internal.service.MethodHandleBasedServiceMethodFactory47e87df4713e4ce5
org.gradle.internal.service.ReflectionBasedServiceMethod56fdba7d8393253f
org.gradle.internal.service.RelevantMethodsebb3efee7040ae62
org.gradle.internal.service.RelevantMethods.RelevantMethodsBuilder653e2ed3e283822f
org.gradle.internal.service.ServiceAccesseae993853cb06bb6
org.gradle.internal.service.ServiceAccess.1c3b7931689739967
org.gradle.internal.service.ServiceRegistryBuilder73d1a49b155b6056
org.gradle.internal.service.TypeStringFormatter8ef195776e72517f
org.gradle.internal.time.MonotonicClock723fd7c85fffe54b
org.gradle.internal.time.Time118854647ab7eed4
org.gradle.internal.time.TimeSource79d456cc39bbde3d
org.gradle.internal.time.TimeSource.14be788ff9e4278cd
org.gradle.process.internal.worker.WorkerLoggingSerializeradae78bad8b0e727
org.gradle.process.internal.worker.child.ActionExecutionWorkera7d30aba9c762788
org.gradle.process.internal.worker.child.ActionExecutionWorker.1d0eba6bfe3f78d57
org.gradle.process.internal.worker.child.DefaultWorkerDirectoryProvider10469cccf2e081cb
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorkerd068f629bd86a048
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.26c72a17e07c7b1b9
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.ContextImplceb41a6c2d98061a
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.PrintUnrecoverableErrorToFileHandlere4f71133f397bb64
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.WorkerServices186bde299ffe3d06
org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.WorkerServices.169593c5a4d4ba8bb
org.gradle.process.internal.worker.child.WorkerLogEventListener4a0b5fb708591833
org.gradle.process.internal.worker.messaging.WorkerConfig8633f06980fb19fd
org.gradle.process.internal.worker.messaging.WorkerConfigSerializerb913cc847f396960
org.gradle.util.internal.ArrayUtils05a97ebb2e812055
org.gradle.util.internal.CollectionUtils9f70df10e2ea4351
org.jocl.CLbe2b7810f6117c46
org.jocl.LibInitializer972a01f330e2b4cd
org.jocl.LibUtilsd016ded731a8db56
org.jocl.LibUtils.1f87faf29dfb4a946
org.jocl.LibUtils.ArchType10d4e9603723bf16
org.jocl.LibUtils.OSType1ab46273bca5c83c
org.jocl.NativePointerObject888c003dc5d6dd7d
org.jocl.Pointer99d83aeefb3777cd
org.junit.jupiter.api.DisplayNameGenerator1c70d4d828122f05
org.junit.jupiter.api.DisplayNameGenerator.IndicativeSentencesb23b44fe1a1ae4b6
org.junit.jupiter.api.DisplayNameGenerator.ReplaceUnderscores45af1f815eb3bfc6
org.junit.jupiter.api.DisplayNameGenerator.Simple3587fc3bd5ac68a7
org.junit.jupiter.api.DisplayNameGenerator.Standard232bffaaa51a0c4e
org.junit.jupiter.api.TestInstance.Lifecycle235138c6fffd45f1
org.junit.jupiter.api.extension.ConditionEvaluationResultfc311dfabd3a0e23
org.junit.jupiter.api.extension.ExtensionContextdacb7330135ba8f9
org.junit.jupiter.api.extension.ExtensionContext.Namespaceeb8d03782ab35d64
org.junit.jupiter.api.extension.ExtensionContext.Store288780f400093c7c
org.junit.jupiter.api.extension.InvocationInterceptor695ac2a6b4b9c7e4
org.junit.jupiter.engine.JupiterTestEngine011031d0b1fe58db
org.junit.jupiter.engine.config.CachingJupiterConfiguration9da5fe6b78ad9a14
org.junit.jupiter.engine.config.DefaultJupiterConfigurationbbee9c72790c271d
org.junit.jupiter.engine.config.EnumConfigurationParameterConverter433eec982a6fabbc
org.junit.jupiter.engine.config.InstantiatingConfigurationParameterConverterd2270f0957971443
org.junit.jupiter.engine.descriptor.AbstractExtensionContext6b3fc41ad8b41d4f
org.junit.jupiter.engine.descriptor.ClassBasedTestDescriptor414ee653c9e673cf
org.junit.jupiter.engine.descriptor.ClassExtensionContexte804dacaeaef4a6a
org.junit.jupiter.engine.descriptor.ClassTestDescriptor2f87db51b4485e07
org.junit.jupiter.engine.descriptor.DefaultTestInstanceFactoryContextb1b7d61e94c58605
org.junit.jupiter.engine.descriptor.DisplayNameUtils8a6f8eeb3e12ddf6
org.junit.jupiter.engine.descriptor.ExtensionUtils43a683ad1b768e92
org.junit.jupiter.engine.descriptor.JupiterEngineDescriptor3d2dbddce296b041
org.junit.jupiter.engine.descriptor.JupiterEngineExtensionContext7146ce9988edfce2
org.junit.jupiter.engine.descriptor.JupiterTestDescriptor67ad750cdb2cb53b
org.junit.jupiter.engine.descriptor.LifecycleMethodUtils286eb923d0b68032
org.junit.jupiter.engine.descriptor.MethodBasedTestDescriptorf531f49451e39050
org.junit.jupiter.engine.descriptor.MethodExtensionContextb5abe6523f4a32d7
org.junit.jupiter.engine.descriptor.TestInstanceLifecycleUtilsa247fc379f47df66
org.junit.jupiter.engine.descriptor.TestMethodTestDescriptor35334f82ecefa63c
org.junit.jupiter.engine.discovery.AbstractAnnotatedDescriptorWrapper90b10f2d90d7b01b
org.junit.jupiter.engine.discovery.AbstractOrderingVisitorf8eb297929c247eb
org.junit.jupiter.engine.discovery.AbstractOrderingVisitor.DescriptorWrapperOrdererc8e1585f8474ed61
org.junit.jupiter.engine.discovery.ClassOrderingVisitor1f09fc1c6b9779bb
org.junit.jupiter.engine.discovery.ClassSelectorResolvere25bb2b197bc8493
org.junit.jupiter.engine.discovery.DefaultClassDescriptor9064f3528773a161
org.junit.jupiter.engine.discovery.DiscoverySelectorResolver5dc6be896f50996f
org.junit.jupiter.engine.discovery.MethodFinder621c8591e557439a
org.junit.jupiter.engine.discovery.MethodOrderingVisitor7d9864cebac818e1
org.junit.jupiter.engine.discovery.MethodSelectorResolver679c52dec5ee3cd2
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType2ca704c5264882ae
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType.1b3bc3007a7dfdaa0
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType.2598aec8eeefe85e3
org.junit.jupiter.engine.discovery.MethodSelectorResolver.MethodType.3e8fd5325e2431a2b
org.junit.jupiter.engine.discovery.predicates.IsInnerClassd746bcff9a71ec26
org.junit.jupiter.engine.discovery.predicates.IsNestedTestClassf75dfd9ee2347890
org.junit.jupiter.engine.discovery.predicates.IsPotentialTestContainer909f14a1b9fe84dc
org.junit.jupiter.engine.discovery.predicates.IsTestClassWithTests34690a186bfcf3ac
org.junit.jupiter.engine.discovery.predicates.IsTestFactoryMethod941a8af0d47a68fd
org.junit.jupiter.engine.discovery.predicates.IsTestMethodf2039dbd13fce110
org.junit.jupiter.engine.discovery.predicates.IsTestTemplateMethodc13a4260435c18a8
org.junit.jupiter.engine.discovery.predicates.IsTestableMethod4be487dee199f633
org.junit.jupiter.engine.execution.ConditionEvaluatordf91d94b180fe511
org.junit.jupiter.engine.execution.ConstructorInvocation60b80968f2bdedc3
org.junit.jupiter.engine.execution.DefaultExecutableInvoker97f15d1e3151968f
org.junit.jupiter.engine.execution.DefaultTestInstances0fc6d90567826bc4
org.junit.jupiter.engine.execution.InterceptingExecutableInvoker42cb185ff5e76387
org.junit.jupiter.engine.execution.InterceptingExecutableInvoker.ReflectiveInterceptorCall7e154d03f7a732e5
org.junit.jupiter.engine.execution.InvocationInterceptorChain9798b2a812d2015d
org.junit.jupiter.engine.execution.InvocationInterceptorChain.InterceptedInvocation199eef1acbe0b316
org.junit.jupiter.engine.execution.InvocationInterceptorChain.ValidatingInvocationf064b1c2c4a4bf86
org.junit.jupiter.engine.execution.JupiterEngineExecutionContextb48cc2a96dab0116
org.junit.jupiter.engine.execution.JupiterEngineExecutionContext.Builderd1557432e23d2776
org.junit.jupiter.engine.execution.JupiterEngineExecutionContext.State3926323ef1c7fb03
org.junit.jupiter.engine.execution.MethodInvocation8b8fd00463d994df
org.junit.jupiter.engine.execution.NamespaceAwareStore00e5ea1337f34969
org.junit.jupiter.engine.execution.ParameterResolutionUtils5aba48e342016f8f
org.junit.jupiter.engine.execution.TestInstancesProvider357bca6226069e7b
org.junit.jupiter.engine.extension.DisabledCondition1604b4e34c1363e4
org.junit.jupiter.engine.extension.ExtensionRegistry687649643dbb04fc
org.junit.jupiter.engine.extension.MutableExtensionRegistry4daca7ba95c88845
org.junit.jupiter.engine.extension.RepeatedTestExtension7a30afad0f944ea5
org.junit.jupiter.engine.extension.TempDirectoryd2ce4804a30f8d8c
org.junit.jupiter.engine.extension.TempDirectory.Scope3a056889e3e86fe7
org.junit.jupiter.engine.extension.TestInfoParameterResolver3c520f8376f91ff7
org.junit.jupiter.engine.extension.TestReporterParameterResolver7187071bfc76c6ac
org.junit.jupiter.engine.extension.TimeoutConfiguration44b8593a8e980687
org.junit.jupiter.engine.extension.TimeoutDurationParserbb6a412c3829dae9
org.junit.jupiter.engine.extension.TimeoutExtension13bcdadb20fcc7bb
org.junit.jupiter.engine.support.JupiterThrowableCollectorFactory46546a446de4c9c0
org.junit.jupiter.engine.support.OpenTest4JAndJUnit4AwareThrowableCollectore9ee7d4e1adecdd1
org.junit.platform.commons.function.Try5200e6adc191344c
org.junit.platform.commons.function.Try.Failure5d1cf7b52cd7a7ea
org.junit.platform.commons.logging.LoggerFactory39fdfe1f67bc0eda
org.junit.platform.commons.logging.LoggerFactory.DelegatingLoggerc71dcf008235901c
org.junit.platform.commons.support.AnnotationSupport4b0c63263b83acb5
org.junit.platform.commons.support.ReflectionSupportdb9de9450da5225a
org.junit.platform.commons.util.AnnotationUtilsefebc064783617e1
org.junit.platform.commons.util.ClassLoaderUtils0d0959e2f6aa173e
org.junit.platform.commons.util.ClassNamePatternFilterUtilse725a6f058746f53
org.junit.platform.commons.util.ClassUtils60a2276f3701443f
org.junit.platform.commons.util.ClasspathScanner54e3df9bb2092b52
org.junit.platform.commons.util.CollectionUtilsd47999c87f911057
org.junit.platform.commons.util.Preconditions2c2a6e13cda880d4
org.junit.platform.commons.util.ReflectionUtils172cf9786a51e883
org.junit.platform.commons.util.ReflectionUtils.HierarchyTraversalMode349d54e51f2ffb44
org.junit.platform.commons.util.StringUtils237c0cb03ac19254
org.junit.platform.commons.util.UnrecoverableExceptionse906a774e770e7d4
org.junit.platform.engine.CompositeFilter6a52e5b4f7292f48
org.junit.platform.engine.CompositeFilter.1cc0aadc5880fb4e4
org.junit.platform.engine.EngineDiscoveryListenerc3024068e43bb7f4
org.junit.platform.engine.EngineDiscoveryListener.1a4cdbe8dd38d8f57
org.junit.platform.engine.EngineExecutionListener693fee5cbd4c2df0
org.junit.platform.engine.EngineExecutionListener.1999902b68f81dd9a
org.junit.platform.engine.ExecutionRequestb74e001541d12dd1
org.junit.platform.engine.Filter5ffaaa90df97ca04
org.junit.platform.engine.FilterResulta787a89e1f12d534
org.junit.platform.engine.SelectorResolutionResultca52e15a278dcf5c
org.junit.platform.engine.SelectorResolutionResult.Statusc505c2274f89f01d
org.junit.platform.engine.TestDescriptora828437d5cd2ea4f
org.junit.platform.engine.TestDescriptor.Type7628a7c639ef3a60
org.junit.platform.engine.TestExecutionResult6b1b512d17bb680e
org.junit.platform.engine.TestExecutionResult.Statusad256e9fb4407e04
org.junit.platform.engine.UniqueId4308af7bfbde4ba1
org.junit.platform.engine.UniqueId.Segmentf2d36a9ca9d14367
org.junit.platform.engine.UniqueIdFormat6c86362ad62a1954
org.junit.platform.engine.discovery.ClassSelector3174b37b3ba53b7e
org.junit.platform.engine.discovery.DiscoverySelectors7863536f4276f4dd
org.junit.platform.engine.discovery.MethodSelector3fe9eccb2ba205d2
org.junit.platform.engine.support.descriptor.AbstractTestDescriptorb9c965daf4d9a476
org.junit.platform.engine.support.descriptor.ClassSource37bd92069360f773
org.junit.platform.engine.support.descriptor.EngineDescriptor8f2f77769ee0e9c9
org.junit.platform.engine.support.descriptor.MethodSource1d55ac49f5cabc20
org.junit.platform.engine.support.discovery.ClassContainerSelectorResolverdc6114dc7e983729
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolution506a6b871d2fd8fe
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolution.DefaultContextdb18f59764ea1f2a
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolvere7fb3042ea8112f0
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolver.Builderd86618af76b95613
org.junit.platform.engine.support.discovery.EngineDiscoveryRequestResolver.DefaultInitializationContext1904819635770d62
org.junit.platform.engine.support.discovery.SelectorResolvere64e4fd796d9641d
org.junit.platform.engine.support.discovery.SelectorResolver.Match789c682356298d75
org.junit.platform.engine.support.discovery.SelectorResolver.Match.Type1761e56439c8d93c
org.junit.platform.engine.support.discovery.SelectorResolver.Resolutionab713bbdee405d17
org.junit.platform.engine.support.hierarchical.ExclusiveResourcec29acbe41918b09a
org.junit.platform.engine.support.hierarchical.ExclusiveResource.LockMode96e95d210b150f97
org.junit.platform.engine.support.hierarchical.HierarchicalTestEngine3ac292151741b7fc
org.junit.platform.engine.support.hierarchical.HierarchicalTestExecutor963cba9b029b4b19
org.junit.platform.engine.support.hierarchical.LockManager5aedd3bd3957b5a6
org.junit.platform.engine.support.hierarchical.Node5c68850150771b6e
org.junit.platform.engine.support.hierarchical.Node.SkipResult5aca1404ff0f9294
org.junit.platform.engine.support.hierarchical.NodeExecutionAdvisor7c2670c7a35cfba6
org.junit.platform.engine.support.hierarchical.NodeTestTaskf652d8cc5e11bdc5
org.junit.platform.engine.support.hierarchical.NodeTestTask.DefaultDynamicTestExecutorabd00dd511d28b2f
org.junit.platform.engine.support.hierarchical.NodeTestTask.DynamicTaskState22172225a9caa539
org.junit.platform.engine.support.hierarchical.NodeTestTaskContextbdf88cd3834282a5
org.junit.platform.engine.support.hierarchical.NodeTreeWalkerc689092b060d0b12
org.junit.platform.engine.support.hierarchical.NodeUtilsa7ec8f66d373c169
org.junit.platform.engine.support.hierarchical.NodeUtils.15a44a7e2cbf864b4
org.junit.platform.engine.support.hierarchical.OpenTest4JAwareThrowableCollectoraac6232f8fe482f5
org.junit.platform.engine.support.hierarchical.SameThreadHierarchicalTestExecutorService2f3b283eba81629f
org.junit.platform.engine.support.hierarchical.SingleLock2036ec8b92a38105
org.junit.platform.engine.support.hierarchical.ThrowableCollector6fd7a27676be3c50
org.junit.platform.engine.support.store.NamespacedHierarchicalStoref773d297d7dc3275
org.junit.platform.engine.support.store.NamespacedHierarchicalStore.CompositeKey3f8758b273ff41a9
org.junit.platform.engine.support.store.NamespacedHierarchicalStore.EvaluatedValue3362298f87d9b160
org.junit.platform.engine.support.store.NamespacedHierarchicalStore.MemoizingSupplierbe04f7b805ba11e1
org.junit.platform.engine.support.store.NamespacedHierarchicalStore.StoredValue8e79d12821d1a835
org.junit.platform.launcher.EngineDiscoveryResult9f305fb9cafa070a
org.junit.platform.launcher.EngineDiscoveryResult.Statusc6f73a818e869b3a
org.junit.platform.launcher.LauncherDiscoveryListener4c7a9b5f0af6369d
org.junit.platform.launcher.LauncherDiscoveryListener.1d946f222ae757dc1
org.junit.platform.launcher.LauncherSessionListenere0db832b050d072e
org.junit.platform.launcher.LauncherSessionListener.144b3640faa83f474
org.junit.platform.launcher.TestExecutionListenerd5f44a91fb9bf46c
org.junit.platform.launcher.TestIdentifier2b393a1d76332bc4
org.junit.platform.launcher.TestPlan1c1994f8265f5a45
org.junit.platform.launcher.core.CompositeTestExecutionListener2fec5f997b539877
org.junit.platform.launcher.core.DefaultDiscoveryRequest5706e3938a47edbc
org.junit.platform.launcher.core.DefaultLauncher75b262c721c1b524
org.junit.platform.launcher.core.DefaultLauncherConfig6fbfe73d83f861ce
org.junit.platform.launcher.core.DefaultLauncherSessionc8ae22f36a4f9c66
org.junit.platform.launcher.core.DefaultLauncherSession.ClosedLauncher33b03a5d32880c72
org.junit.platform.launcher.core.DefaultLauncherSession.DelegatingLauncher62a46fcfba060cd0
org.junit.platform.launcher.core.DelegatingEngineExecutionListener98129d4f91790da1
org.junit.platform.launcher.core.EngineDiscoveryOrchestratore664ca6c3b9b649f
org.junit.platform.launcher.core.EngineDiscoveryOrchestrator.Phase268c73a2f40672ad
org.junit.platform.launcher.core.EngineDiscoveryResultValidatorae8e824d499c28c0
org.junit.platform.launcher.core.EngineExecutionOrchestratoref50d34e593c6435
org.junit.platform.launcher.core.EngineIdValidator6ec884e3f1252b64
org.junit.platform.launcher.core.ExecutionListenerAdapterb7c31393576744dc
org.junit.platform.launcher.core.InternalTestPlan69b2dd891a2eff73
org.junit.platform.launcher.core.LauncherConfig33646d7c20caa86c
org.junit.platform.launcher.core.LauncherConfig.Builder1a313fdb0cf517bd
org.junit.platform.launcher.core.LauncherConfigurationParameters3c045d9855c3582c
org.junit.platform.launcher.core.LauncherConfigurationParameters.Builderd4314d11c6458cba
org.junit.platform.launcher.core.LauncherConfigurationParameters.ParameterProviderdbf430fc5972aefc
org.junit.platform.launcher.core.LauncherConfigurationParameters.ParameterProvider.2fa4e3fee03856df9
org.junit.platform.launcher.core.LauncherConfigurationParameters.ParameterProvider.390f56b20ab147687
org.junit.platform.launcher.core.LauncherDiscoveryRequestBuilder75b65d32610aecc6
org.junit.platform.launcher.core.LauncherDiscoveryResultd1da1616bd553127
org.junit.platform.launcher.core.LauncherFactory8e309d53ca525395
org.junit.platform.launcher.core.ListenerRegistry4950f6c47b32949e
org.junit.platform.launcher.core.OutcomeDelayingEngineExecutionListener4c68ad66a29b4dd7
org.junit.platform.launcher.core.OutcomeDelayingEngineExecutionListener.Outcomeb6ca0889820c3cca
org.junit.platform.launcher.core.ServiceLoaderRegistryb9cb7c73b65895b8
org.junit.platform.launcher.core.ServiceLoaderTestEngineRegistryf98f04d3db2fcfbb
org.junit.platform.launcher.core.StreamInterceptingTestExecutionListener36972afd5e542435
org.junit.platform.launcher.listeners.UniqueIdTrackingListener267976e1a69ba0ae
org.junit.platform.launcher.listeners.discovery.AbortOnFailureLauncherDiscoveryListeneree6720edc40a9ccf
org.junit.platform.launcher.listeners.discovery.LauncherDiscoveryListenersd311082436d55ae9
org.junit.platform.launcher.listeners.discovery.LauncherDiscoveryListeners.LauncherDiscoveryListenerTypee18e1a0e62e22287
org.junit.platform.launcher.listeners.session.LauncherSessionListeners792ecbf10e49d607
org.objenesis.ObjenesisBase0c1d2fd83029257f
org.objenesis.ObjenesisHelper69c98e9d865aa4c7
org.objenesis.ObjenesisSerializere76a0838790f7d3f
org.objenesis.ObjenesisStdf35c83a75caea811
org.objenesis.instantiator.sun.SunReflectionFactoryHelperd17e7b3403696605
org.objenesis.instantiator.sun.SunReflectionFactoryInstantiator6156947e7d7c507c
org.objenesis.strategy.BaseInstantiatorStrategyb0aaa6460452f5ce
org.objenesis.strategy.PlatformDescriptionc6456f671febfd7c
org.objenesis.strategy.SerializingInstantiatorStrategy1a828beecea3b998
org.objenesis.strategy.StdInstantiatorStrategyabae05ba56ea35a6
org.opentest4j.IncompleteExecutionException2e509be1b3aff27c
org.opentest4j.TestAbortedExceptiondf6ac182267bd251
org.slf4j.LoggerFactorya381b7ddf19bf47d
org.slf4j.LoggerFactoryb876abf6c07df61e
org.slf4j.bridge.SLF4JBridgeHandlera24ab9068b3f1049
org.slf4j.event.Level4ced2c509667233d
org.slf4j.helpers.AbstractLogger0927772f80afa51d
org.slf4j.helpers.BasicMDCAdapter354fafb117483fdb
org.slf4j.helpers.BasicMDCAdapter.18f0671fb507009fb
org.slf4j.helpers.BasicMarkerFactoryd8e0b7e9d11b515c
org.slf4j.helpers.FormattingTuplef769e1b68746078d
org.slf4j.helpers.LegacyAbstractLogger9c0bab469712e43b
org.slf4j.helpers.MessageFormattere2bc58b82ebe1d3d
org.slf4j.helpers.NOPLoggerFactoryeaf704972ef7000c
org.slf4j.helpers.NOPLoggerFactory54f5632bfcb8d8d5
org.slf4j.helpers.NOPMDCAdapterd816a97d0b663014
org.slf4j.helpers.NOP_FallbackServiceProvider44c4aa253bad3620
org.slf4j.helpers.NormalizedParametersd9375a4f0639bb9b
org.slf4j.helpers.SubstituteLoggerFactory2c5fb1b0f92b644d
org.slf4j.helpers.SubstituteLoggerFactorydc7efc0107a4a62d
org.slf4j.helpers.SubstituteServiceProvider1caf06178d203dfd
org.slf4j.helpers.ThreadLocalMapOfStacks2b24a935616f8730
org.slf4j.helpers.Util859d67cf0632e467
org.slf4j.helpers.Util857ff3acc0576435
org.slf4j.impl.StaticLoggerBinder6822bf7129d487fa
org.slf4j.simple.OutputChoice1210473ae6c23a02
org.slf4j.simple.OutputChoice.OutputChoiceTypeb1a3560a9741f5b4
org.slf4j.simple.SimpleLogger1d2d4d9ea2830f3d
org.slf4j.simple.SimpleLoggerConfiguration1f2875dbbf5941d4
org.slf4j.simple.SimpleLoggerFactorye94c2f64e0e452e9
org.slf4j.simple.SimpleServiceProvider99b66f9433a7c345
org.spockframework.builder.AddSlotFactory47a8966e48675214
org.spockframework.builder.BuilderHelper8cb4ee23998985ab
org.spockframework.builder.ClosureBlueprint67468f2a43cba42c
org.spockframework.builder.CollectionSlotFactory7712971048ff161d
org.spockframework.builder.DelegatingScriptfc3f099d620c9ff0
org.spockframework.builder.DelegatingScriptBlueprintf4b2b6e00d6f7c4f
org.spockframework.builder.GestaltBuilder22e49e1893b26db2
org.spockframework.builder.PojoGestaltf1fed7462e6378bc
org.spockframework.builder.Sculpturer416cfc78571a3d83
org.spockframework.builder.SetterLikeSlotdb452b55b232db1a
org.spockframework.builder.SetterSlotFactory08b0d770857aa3ae
org.spockframework.builder.SpockConfigurationGestaltb4d2217d5ccf666d
org.spockframework.compiler.AstNodeCache78b0345ad06c5d97
org.spockframework.compiler.ErrorReporterc638bf8765ea3a40
org.spockframework.compiler.SourceLookupda57855531113ffd
org.spockframework.compiler.SpockTransformb0b2e7b5160ee52e
org.spockframework.compiler.SpockTransform.Impld4d5a9abe49c559b
org.spockframework.gentyref.GenericArrayTypeImpla3c9503656316152
org.spockframework.gentyref.GenericTypeReflector2d2426d078b7d402
org.spockframework.gentyref.ParameterizedTypeImpl3704c3078502f916
org.spockframework.gentyref.VarMap49b43c23787020a9
org.spockframework.gentyref.WildcardTypeImpl6ef28d719d210278
org.spockframework.lang.SpecInternals377f6e381f79a10f
org.spockframework.lang.Wildcardffe9bfc07e2ed5ec
org.spockframework.mock.CallRealMethodResponse7e2f57191efc0fc3
org.spockframework.mock.DefaultCompareToInteractionea5fc92db5b20fc7
org.spockframework.mock.DefaultEqualsInteraction8208999c36155d5a
org.spockframework.mock.DefaultFinalizeInteraction7c261c56cb350f68
org.spockframework.mock.DefaultHashCodeInteraction35b7637db7deb27f
org.spockframework.mock.DefaultInteraction69d7ccbbe519ba76
org.spockframework.mock.DefaultJavaLangObjectInteractions2c3b2053420d606e
org.spockframework.mock.DefaultToStringInteractionfac3516a2507a85e
org.spockframework.mock.EmptyOrDummyResponse6cca9236e915e1c2
org.spockframework.mock.MockImplementation8e81e436747b9bed
org.spockframework.mock.MockNature55e68d585c3f25d8
org.spockframework.mock.MockUtil188d4d2d0ccf2347
org.spockframework.mock.ZeroOrNullResponse28396cb34ac55639
org.spockframework.mock.constraint.CodeArgumentConstraint702a2728fa679f48
org.spockframework.mock.constraint.EqualArgumentConstraint975e7783b00547eb
org.spockframework.mock.constraint.EqualMethodNameConstraint312643211e8acf71
org.spockframework.mock.constraint.EqualPropertyNameConstraint9b8794fd6d332693
org.spockframework.mock.constraint.PositionalArgumentListConstraint316ff590393cf0b6
org.spockframework.mock.constraint.PropertyNameConstraint54383642627ccdcc
org.spockframework.mock.constraint.TargetConstraintdbadb8f561b4b64d
org.spockframework.mock.constraint.WildcardArgumentConstraint0b3351e8604acd0a
org.spockframework.mock.response.ConstantResponseGeneratord416d1ca82eb8d66
org.spockframework.mock.response.DefaultResponseGeneratorf137830e1e5827ea
org.spockframework.mock.response.IterableResponseGenerator7bc259a5db5ec37c
org.spockframework.mock.response.ResponseGeneratorChain10e417bf8aa94652
org.spockframework.mock.response.SingleResponseGenerator3e8c9058acf1f704
org.spockframework.mock.runtime.BaseMockInterceptorc7ccc665bb4a6e75
org.spockframework.mock.runtime.ByteBuddyInterceptorAdapter71a8cdce6418661e
org.spockframework.mock.runtime.ByteBuddyMethodInvoker85a24d28047f767e
org.spockframework.mock.runtime.ByteBuddyMockFactory16200dc5554499d1
org.spockframework.mock.runtime.CompositeMockFactoryb1d5d887c2db8500
org.spockframework.mock.runtime.DefaultMethodInvokerd51f6dc5f37480c5
org.spockframework.mock.runtime.DynamicProxyMockFactorya9b4cd135f3869d1
org.spockframework.mock.runtime.DynamicProxyMockInterceptorAdapter4639ee53210b990f
org.spockframework.mock.runtime.FailingRealMethodInvoker4e9d5311e667723f
org.spockframework.mock.runtime.GroovyMockFactorye477708d1d6b5512
org.spockframework.mock.runtime.InteractionBuilder34ebc6f4e9adef2a
org.spockframework.mock.runtime.InteractionScope452fae5ebc120b80
org.spockframework.mock.runtime.InteractionScope.1c47d591df2df11f4
org.spockframework.mock.runtime.JavaMockFactory42941c86e53fe3b9
org.spockframework.mock.runtime.JavaMockInterceptordb2e9b989115e40f
org.spockframework.mock.runtime.MockConfigurationffedc90555d74345
org.spockframework.mock.runtime.MockController336160410b9d026f
org.spockframework.mock.runtime.MockInstantiatore892fb38afe0c865
org.spockframework.mock.runtime.MockInstantiator.ObjenesisInstantiator602ea08b29fd65a6
org.spockframework.mock.runtime.MockInteraction637a57663b1ba0a3
org.spockframework.mock.runtime.MockInteractionDecorator00a2ae82d639360c
org.spockframework.mock.runtime.MockInvocation49596d842a5a09d9
org.spockframework.mock.runtime.MockObjectfcf8c7b47eea0f19
org.spockframework.mock.runtime.ProxyBasedMockFactory906165bfa8117a2e
org.spockframework.mock.runtime.StaticMockMethod2b98ff78d7ae026c
org.spockframework.report.log.ReportLogConfiguration39a92f3c9495992d
org.spockframework.runtime.ClassSelectorResolver9d7659603b79a2d0
org.spockframework.runtime.ConfigurationBuilderdae757d023df838f
org.spockframework.runtime.ConfigurationScriptLoader62ead2668fc64c42
org.spockframework.runtime.DataIteratorFactoryd6bf810efa2a6e08
org.spockframework.runtime.DataIteratorFactory.BaseDataIterator409be1224aa51ec6
org.spockframework.runtime.DataIteratorFactory.DataProcessorIterator0efd545bc1629677
org.spockframework.runtime.DataIteratorFactory.FeatureDataProviderIterator5493b0c886444c2c
org.spockframework.runtime.DataVariablesIterationNameProvider725b89b82856e8cb
org.spockframework.runtime.DefaultParallelExecutionConfiguration7a290da84977c982
org.spockframework.runtime.ErrorCollector6450e8d20a9049c5
org.spockframework.runtime.ErrorInfoCollector3c0e3d1e2d3f5eab
org.spockframework.runtime.ErrorRethrowerf68bfce336bcdc14
org.spockframework.runtime.ExtensionClassesLoader5f971a225c840b93
org.spockframework.runtime.ExtensionRunner9fcadadfdb7fcdbd
org.spockframework.runtime.FeatureNode57eb6dcdae3df23d
org.spockframework.runtime.GlobalExtensionRegistrya3bc8ebe1203ca40
org.spockframework.runtime.GroovyRuntimeUtil417a5bc1d380d789
org.spockframework.runtime.HamcrestFacadea2f79a4e4683d813
org.spockframework.runtime.HamcrestFacade.HamcrestFacadeImpld1133931bd892b1b
org.spockframework.runtime.IterationNode6804b212be47cfaa
org.spockframework.runtime.MasterRunListener08c7d4f58bdfd8da
org.spockframework.runtime.MasterRunSupervisor241a7e67a13990c0
org.spockframework.runtime.MethodSelectorResolver0ff0ed76993b56bb
org.spockframework.runtime.ParameterizedFeatureChildExecutor52161fc5493bc16b
org.spockframework.runtime.ParameterizedFeatureChildExecutor.10b3d9d3d7d917b10
org.spockframework.runtime.ParameterizedFeatureChildExecutor.3ab332244ee3237d6
org.spockframework.runtime.ParameterizedFeatureNode1b7bbd568b15f65f
org.spockframework.runtime.PlatformParameterizedSpecRunner9db753a1b1e33a49
org.spockframework.runtime.PlatformParameterizedSpecRunner.1dcc9af332e2b516f
org.spockframework.runtime.PlatformSpecRunnera46aed0815264244
org.spockframework.runtime.RunContext2ed2e4fdc5326bd8
org.spockframework.runtime.SafeIterationNameProviderb19cf500b6e5d264
org.spockframework.runtime.SimpleFeatureNodee74729828070c703
org.spockframework.runtime.SpecInfoBuildereb9677f06891fa06
org.spockframework.runtime.SpecNode797d05fa782844cf
org.spockframework.runtime.SpecUtilfc686272c2fabd9c
org.spockframework.runtime.SpecificationContext989cde1557d1b040
org.spockframework.runtime.SpockEngine3e09e72fffe2a6d5
org.spockframework.runtime.SpockEngineDescriptor7001a366a8c6a957
org.spockframework.runtime.SpockEngineDiscoveryPostProcessore0069892ddb75f57
org.spockframework.runtime.SpockException7fbb382a0c11eb15
org.spockframework.runtime.SpockExecutionContext2fb61928a8877dde
org.spockframework.runtime.SpockNode07082026a213167e
org.spockframework.runtime.SpockRuntimec4b53f11e998d765
org.spockframework.runtime.SpockRuntime.CollectionConditionb44858f0fb7f5c06
org.spockframework.runtime.SpockRuntime.MatcherConditiona1ccecdb385042f4
org.spockframework.runtime.StackTraceFilter2662fced060b1872
org.spockframework.runtime.ValueRecorder51810fc1676009e3
org.spockframework.runtime.condition.DiffedArrayRenderer36d8b8d58df43c2e
org.spockframework.runtime.condition.DiffedClassRendererb5494e05b24145d0
org.spockframework.runtime.condition.DiffedCollectionRenderer06c79a32e52233d1
org.spockframework.runtime.condition.DiffedMapRenderer47db8c34d0939e31
org.spockframework.runtime.condition.DiffedObjectAsBeanRenderer7b9014adfee4f84c
org.spockframework.runtime.condition.DiffedObjectAsStringRenderer734e7469c635c62c
org.spockframework.runtime.condition.DiffedSetRenderer0ca2d6c8fc7c9ba9
org.spockframework.runtime.condition.ObjectRendererService105fa14596625a7c
org.spockframework.runtime.extension.ExtensionExceptione9d43fcf919e317d
org.spockframework.runtime.extension.IAnnotationDrivenExtensionda083713f41de3b8
org.spockframework.runtime.extension.IDataDriver814f405afb0fd7d5
org.spockframework.runtime.extension.IGlobalExtensionde1246442ed0f42e
org.spockframework.runtime.extension.MethodInvocation3993301291a47dd2
org.spockframework.runtime.extension.builtin.ConditionalExtension5f06692b83010697
org.spockframework.runtime.extension.builtin.ConditionalExtension.ConditionInterceptor11a71f1c7a73eb18
org.spockframework.runtime.extension.builtin.ConditionalExtension.IterationConditionbae02784b4343abf
org.spockframework.runtime.extension.builtin.IgnoreExtensionec4bf8e0da10525d
org.spockframework.runtime.extension.builtin.IgnoreIfExtensione0ef9d502e6e1906
org.spockframework.runtime.extension.builtin.IncludeExcludeExtensionf2792789f803d11c
org.spockframework.runtime.extension.builtin.NarrativeExtension7ac35e087a9cc30e
org.spockframework.runtime.extension.builtin.OptimizeRunOrderExtension0493eed8dd6e0a2d
org.spockframework.runtime.extension.builtin.PreconditionContext448b764ac98da01f
org.spockframework.runtime.extension.builtin.PreconditionContext.DataVariableContextException48d60ed1d170ea44
org.spockframework.runtime.extension.builtin.PreconditionContext.PreconditionContextExceptionfca8e059c05d2d7d
org.spockframework.runtime.extension.builtin.PreconditionContext.StrictHashMapfbe2bc182cd73c57
org.spockframework.runtime.extension.builtin.TitleExtensioneccd433ef68a07e5
org.spockframework.runtime.extension.builtin.UnrollConfiguration7b4951b8776d057e
org.spockframework.runtime.extension.builtin.UnrollExtension2d35a2b93d53efee
org.spockframework.runtime.extension.builtin.UnrollIterationNameProviderd7f9a774e60b500e
org.spockframework.runtime.model.BlockInfod049bfafbd4f913c
org.spockframework.runtime.model.BlockKinde34ac40d5aa02fd5
org.spockframework.runtime.model.DataProviderInfoc778353b6cd45c98
org.spockframework.runtime.model.ErrorInfob2b6d54dc60f85af
org.spockframework.runtime.model.ExecutionResultb9ad2e146ffa0e33
org.spockframework.runtime.model.ExpressionInfodb7f7892fa83cc13
org.spockframework.runtime.model.ExpressionInfo.1b98a7c8bd72c0d43
org.spockframework.runtime.model.FeatureInfo52b003d99f8d3035
org.spockframework.runtime.model.FieldInfob524cdd69cec6be8
org.spockframework.runtime.model.IterationFilter7643dcd8fed88412
org.spockframework.runtime.model.IterationFilter.Mode09f35e287319784d
org.spockframework.runtime.model.IterationInfo3c7d04ef92330358
org.spockframework.runtime.model.MethodInfoe89ad50ee30f9bd4
org.spockframework.runtime.model.MethodKind92d6a841415e94d7
org.spockframework.runtime.model.NodeInfoa475da096cbf6c72
org.spockframework.runtime.model.ParameterInfod360170e55191df7
org.spockframework.runtime.model.SpecElementInfo74804a9c0c501d2f
org.spockframework.runtime.model.SpecInfo1b46a9ea27e37b51
org.spockframework.runtime.model.parallel.ExecutionMode980b87177b0b04ee
org.spockframework.tempdir.TempDirConfigurationb9212a2844c16220
org.spockframework.util.Assert4488a80988d72918
org.spockframework.util.CollectionUtileff814d62d8e7451
org.spockframework.util.DataVariableMap40d1013c985bb8f9
org.spockframework.util.DataVariableMap.EntrySet5da69698aedf0c20
org.spockframework.util.DataVariableMap.EntrySet.1ef9e579484ea6b3e
org.spockframework.util.ExceptionUtila5cf2d594cf055e3
org.spockframework.util.GroovyReleaseInfo3b9b7488e1adbab3
org.spockframework.util.Identifiers5cef89bc25bb327e
org.spockframework.util.InternalIdentifiers5c69bb1b2e50cd00
org.spockframework.util.MopUtil20ae12de4399a19d
org.spockframework.util.ReflectionUtil12ffbe8d8aa88a90
org.spockframework.util.RenderUtil3cd9c5fb6bb65eff
org.spockframework.util.SpockReleaseInfoe4df0c7b2676536a
org.spockframework.util.SpockUserHomeUtild56d47bede8c8650
org.spockframework.util.VersionCheckerf8fa9235b608e9ed
org.spockframework.util.VersionNumber3459768b13b6ad20
spock.config.IncludeExcludeCriteria17d2b6ac720a18d7
spock.config.ParallelConfigurationdc005a2536d487d0
spock.config.RunnerConfiguration35f230d4dce09c30
spock.lang.Specification598807043edbc59b
spock.mock.MockingApi73f1186c71e822fe
st.Benchmark_System_Teste8f0cabe5592f1a2
st.Benchmark_System_Test.__spock_feature_0_1_closure36c0063093f71c471
st.Benchmark_System_Test.__spock_feature_0_1_closure43492854546eb14c7
st.Benchmark_System_Test.__spock_feature_0_1_closure5de3756b029e0e2d1
st.Benchmark_System_Test.__spock_feature_0_1_closure62899ed479b072762
st.Benchmark_System_Test.__spock_feature_0_1_closure6._closure8179f0ed5bbb878af
st.Benchmark_System_Test._setup_closure127e111a445371dc9
st.Benchmark_System_Test._setup_closure295b93c20ae2af532
st.Broad_System_Test37c61d154d2824eb
st.Broad_System_Test.__spock_feature_0_2_closure22b0aafb0b43fc5ea
st.Broad_System_Test.__spock_feature_0_2_closure3e491feb05760cb76
st.Broad_System_Test.__spock_feature_0_3_closure4c6e5e185c9aef27e
st.Broad_System_Test.__spock_feature_0_3_closure5fac86be89a4b174c
st.Broad_System_Test._setupSpec_closure170d58fba47398eb4
st.NN_Concepts_Spec8765c7b149fc64db
st.NN_Concepts_Spec.__spock_feature_0_0_closure1d8c9d00e8ac9403c
st.NN_Concepts_Spec.__spock_feature_0_0_closure2af7491d8eff74917
st.NN_Concepts_Spec.__spock_feature_0_0_closure39e80ca216d9173fb
st.Training_NNs_Spec403b22841c761068
st.Training_NNs_Spec.__spock_feature_0_0_closure1f7f35f007c0ce7c1
st.Training_NNs_Spec.__spock_feature_0_1_closure2aa93fe0df9770917
st.Training_NNs_Spec.__spock_feature_0_2_closure3571533c11229c0d6
st.Training_NNs_Spec.__spock_feature_0_2prov0_closure4cc2a9000bd2d826c
st.Training_NNs_Spec.__spock_feature_0_2prov0_closure586c07e936bb7bc1f
st.Training_NNs_Spec.__spock_feature_0_2prov0_closure66f46a5018e797bd3
st.attention.QuasiMultiHeadAttention4678b36c0faa4f4b
st.attention.ReductiveAttentionHead818b778cba5d1775
st.tests.BroadSystemTeste7c116e19bc23331
st.tests.CrossDeviceSystemTest220b62aaac70e406
st.tests.SimpleNNSystemTestb0460f5989fc65f6
st.tests.SimpleNNSystemTest.Modefbd66ee33521590c
st.tests.SimpleNNSystemTest._closure1be381e9970cedc6f
st.tests.SimpleNNSystemTest._closure21486e2c3f180278a
st.tests.SimpleNNSystemTest._closure39a393af57275bd28
st.tests.SimpleNNSystemTest._closure4212f0f298212ce09
st.tests.SimpleNNSystemTest._closure5e0fd28b69d757f1a
st.tests.SimpleNNSystemTest._closure6af7277f8e3188a33
testutility.Load0992dd10f8e2cf1e
testutility.Measure4932d932f09a2c4b
testutility.Sleep085a92ffbc168de3
testutility.Statistics6fe2168af07d7571
testutility.UnitTestera40c5d83f5f11d3b
testutility.UnitTester_Tensore96cbf8be64ed270
testutility.UnitTester_Tensor.TestBroadcast1f965bb27e2cf7a7
testutility.UnitTester_Tensor.TestBroadcast.17ce73b595d110391
testutility.mock.DummyDevice45f8150cfa2afd10
testutility.nns.SimpleFeedForwardNNf4555bd0b0075c63
testutility.opencl.DispatchUtilitycc4b930d45b1e963
ut.autograd.AD_And_Computation_Graph_Spece713c9f3d824e616
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure2de9d94ad0606ff6e
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure351bd9a6fd34d26ad
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure4be8401d32228b463
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure521b6a48852bfbf0c
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure5._closure76f34d1ccad6bc2a7
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure6ef7fd49a75cb3ffd
ut.autograd.AD_And_Computation_Graph_Spec.__spock_feature_0_1_closure6._closure89be41ef6fe720c0b
ut.autograd.AD_And_Computation_Graph_Spec._setup_closure1abcdba2bae83c0c6
ut.autograd.Autograd_Explainedb13bc351a1f05225
ut.autograd.Autograd_Explained._setup_closure1343dbd99b1aaf6b7
ut.autograd.Autograd_Flags_Explainedd27f6900a09636ae
ut.autograd.Autograd_Flags_Explained._setup_closure1f339c25249afa5a8
ut.autograd.Autograd_NN_Spec0302426ba2100560
ut.autograd.Autograd_NN_Spec.__spock_feature_0_0_closure2139545a2c03a98cc
ut.autograd.Autograd_NN_Spec.__spock_feature_0_0_closure30cf78a00e0157930
ut.autograd.Autograd_NN_Spec.__spock_feature_0_1_closure4c4a4b027a9b94766
ut.autograd.Autograd_NN_Spec.__spock_feature_0_1_closure5c525dfb90fe79aad
ut.autograd.Autograd_NN_Spec.__spock_feature_0_2_closure692f6205258070625
ut.autograd.Autograd_NN_Spec.__spock_feature_0_2_closure7537e254c5369bb60
ut.autograd.Autograd_NN_Spec.__spock_feature_0_4_closure89afcec6b3ab5600c
ut.autograd.Autograd_NN_Spec.__spock_feature_0_4_closure96745873693f3ace6
ut.autograd.Autograd_NN_Spec._setup_closure158bab9123b4815e4
ut.autograd.Autograd_Tensor_Spece52428bd783f0963
ut.autograd.Autograd_Tensor_Spec._setup_closure139a61290160f676d
ut.autograd.JITProp_Autograd_Tensor_Spec3a1910821621810a
ut.autograd.JITProp_Autograd_Tensor_Spec._setup_closure10b0126c38ac5d8c6
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Testsd6b0d737d1a3a07c
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.__spock_feature_0_0_closure2d9acfc58469f827d
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.__spock_feature_0_1_closure3ea8ee4ee0e3fcdd7
ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests._setup_closure100509a3382d4e1d4
ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests356578354b049ee5
ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests.__spock_feature_0_0_closure1e4ca920a1d147ddc
ut.backend.Backend_Extension_Specb21e14a12a16738b
ut.backend.Backend_MatMul_Extension_Spec5c3bcd83caffa060
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure16c1b316b0dcf5ee8
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure217ae639dcbefd193
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure10c96826ee08c57824
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure56f3ab1e1d33faaa5
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure6e168062afe5bd7a6
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure775e18ca92bfeea55
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure8b126bad56ae49216
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure8._closure1119fcc395b31233e6
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure8._closure1204940712528f41d9
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure2._closure9d11e07f7d0c13399
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure3100f32e207faf0a1
ut.backend.Backend_MatMul_Extension_Spec.__spock_feature_0_0_closure4bf28e92ae2af9127
ut.backend.Matrix_Multiplication_Speccece9ebc5d775991
ut.backend.core.Backend_Algorithm_AD_Specf4028ef1359f7574
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_0prov0_closure14aa8bf6696037bfc
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_0prov0_closure223aef0bf0827ea95
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_1prov0_closure36e2943c572a320d2
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_1prov0_closure44cc1846973e42c83
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_2prov0_closure528198e5c311834fd
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_2prov0_closure64361701ad2d8c1c1
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_3prov0_closure7f0b5a45aba5006fa
ut.backend.core.Backend_Algorithm_AD_Spec.__spock_feature_0_3prov0_closure82a8b844484e1ba8e
ut.backend.core.Backend_Algorithm_Implementation_Spec2bb52c7b3e989101
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_0prov0_closure11ba5f6af2b5458f8
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_0prov0_closure2808dcd4434fb36f5
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_1prov0_closure348de3d1425052a66
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_1prov0_closure46d1d316658cfa42d
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_2_closure57a15d1377c84d8a6
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_2prov0_closure65f61d1e9b8e14160
ut.backend.core.Backend_Algorithm_Implementation_Spec.__spock_feature_0_2prov0_closure79c5e7269ad30b59e
ut.backend.core.Backend_Functional_Algorithm_Specb8438943eaf108cb
ut.backend.core.Backend_Functional_Algorithm_Spec.TestAlgorithm671910468a1c5feb
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_0prov0_closure19dfbf8144673c74c
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_0prov0_closure28df795f2e7fb25cb
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_0prov0_closure3892f523c95faf186
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure10824192ecf2eb501a
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure434ff0eb83de20e0d
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure54ed02ab09d2126fa
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure6be63b8db4e1b2154
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure7c8a1600ca6f7f3ae
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure83c8e2b995c92fb5b
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_1_closure9cd20c8f70ecbef55
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure11e21954f741898073
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure12eebe85c192be1d61
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure131a3195276ca99851
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2_closure1492825b43ce7c26b3
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure15dd81b7be29aac33f
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure15._closure1855c66c3382538a47
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure1601e0506182a6c06b
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure16._closure1905cd25fb16d7af40
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure17f62d80c27e6b23f3
ut.backend.core.Backend_Functional_Algorithm_Spec.__spock_feature_0_2prov1_closure17._closure20a68f15c97d9cf777
ut.backend.core.InternalMatMulTest939d1838c83f09f0
ut.backend.core.InternalMatMulTest.Typec94fa09a1f788b4d
ut.backend.core.InternalMatMulTest.__fillIt32_closure2ea3aff9f05853b82
ut.backend.core.InternalMatMulTest.__fillIt64_closure172d7bb249bbcf5f5
ut.backend.core.InternalMatMulTest.__fillItI32_closure41d20052676ba0b80
ut.backend.core.InternalMatMulTest.__fillItI64_closure35de634910f80fdc1
ut.backend.core.Matrix_Multiplication_Spec4a96abf7d0ca3c14
ut.backend.core.OpenCL_Backend_Spec4c483d8833a096a3
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure17313842ec5ae189a
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure2ab68f162c40cba4f
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure39c126c7021e740c8
ut.backend.core.OpenCL_Backend_Spec.__spock_feature_0_0_closure42372986bc50d3329
ut.backend.core.Randomization_Spec2017d25802c809c7
ut.backend.mocks.CLContext5b144f6f430647bb
ut.device.CPU_Spec65209c1b945a4c90
ut.device.CPU_Spec.__spock_feature_0_3_closure347356f1b9a575543
ut.device.CPU_Spec.__spock_feature_0_3_closure4def6d5591e9b2c04
ut.device.CPU_Spec._setup_closure1487a0cb165aa69a1
ut.device.Cross_Device_IO_Spec5c56921b8f45b58d
ut.device.Cross_Device_IO_Spec.__spock_feature_0_0_closure14d634bd4edb54755
ut.device.Cross_Device_IO_Spec.__spock_feature_0_1_closure275d02e9a9674bfab
ut.device.Cross_Device_Type_Spec65706672d53cd28b
ut.device.Cross_Device_Type_Spec.__spock_feature_0_0_closure2abb8177b5402e197
ut.device.Cross_Device_Type_Spec.__spock_feature_0_10_closure139a898b0151a387ce
ut.device.Cross_Device_Type_Spec.__spock_feature_0_3_closure369547f53e84d888e
ut.device.Cross_Device_Type_Spec.__spock_feature_0_4_closure433d3bb766df7d84e
ut.device.Cross_Device_Type_Spec.__spock_feature_0_4_closure53165be4d1d1b05b3
ut.device.Cross_Device_Type_Spec.__spock_feature_0_5_closure67066203894d92b63
ut.device.Cross_Device_Type_Spec.__spock_feature_0_7_closure7b6889ff046fb5688
ut.device.Cross_Device_Type_Spec.__spock_feature_0_8_closure10a57426e57c92c037
ut.device.Cross_Device_Type_Spec.__spock_feature_0_8_closure8b00a93dcb04f6940
ut.device.Cross_Device_Type_Spec.__spock_feature_0_8_closure91ed98d3e8db47555
ut.device.Cross_Device_Type_Spec._setup_closure11621c2ee3a430593
ut.device.FileDevice_Spec1f7ceaa16965ab8e
ut.device.FileDevice_Spec.__spock_feature_0_1_closure254168dc73814c98c
ut.device.FileDevice_Spec.__spock_feature_0_1_closure37f09d47482eef8ce
ut.device.FileDevice_Spec.__spock_feature_0_1_closure4e3dc9387f69e29f5
ut.device.FileDevice_Spec.__spock_feature_0_2_closure5631a3481fa634aeb
ut.device.FileDevice_Spec.__spock_feature_0_2_closure6db3cbdd2bb2e5d35
ut.device.FileDevice_Spec._setup_closure18cd08fee29766447
ut.device.OpenCLDevice_Exception_Spec7a3f02947203606f
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_0_closure1a361143b31733ce0
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_1_closure2c9e74edaa83f3dab
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_2_closure31aa02d38d6bef5dd
ut.device.OpenCLDevice_Exception_Spec.__spock_feature_0_3_closure41e9bdc199b0c483d
ut.device.OpenCLDevice_Spec7c89f624f04c93c5
ut.device.OpenCLDevice_Spec.__spock_feature_0_0_closure2ee530eef16543de8
ut.device.OpenCLDevice_Spec.__spock_feature_0_1_closure4ccd8682b02ce9f9f
ut.device.OpenCLDevice_Spec.__spock_feature_0_2_closure6124f9b021a94a186
ut.device.OpenCLDevice_Spec.__spock_feature_0_3_closure71897612145675b41
ut.device.OpenCLDevice_Spec.__spock_feature_0_4_closure82d039681fec9ae08
ut.device.OpenCLDevice_Spec.__spock_feature_0_5_closure10dbb2fb8286f6149a
ut.device.OpenCLDevice_Spec.__spock_feature_0_6_closure15c915ec6db365409c
ut.device.OpenCLDevice_Spec.__spock_feature_0_7_closure203309fb4bc2d0d417
ut.device.OpenCLDevice_Spec._setupSpec_closure1ab090fe3e38dbaab
ut.device.OpenCL_Spec5314f091b8d85b28
ut.device.OpenCL_Spec.__spock_feature_0_0_closure17d90b537d09655f4
ut.device.OpenCL_Spec.__spock_feature_0_1_closure26528d6f6ce0c877c
ut.device.OpenCL_Spec.__spock_feature_0_2_closure3f3dda9ce97aefde7
ut.device.OpenCL_Spec.__spock_feature_0_3_closure460db5974e4d5ea6c
ut.device.OpenCL_Spec.__spock_feature_0_4_closure6fd96c02629a77f92
ut.device.internal.CLFunctionCompiler_Spec9476d3cedd5d5225
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_0_closure24a439fcd2b759586
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_1_closure38fe2340686f555d0
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_2_closure47f940e8f0ecb9851
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_2_closure537fb09f6cfd98f50
ut.device.internal.CLFunctionCompiler_Spec.__spock_feature_0_2_closure6af3b7d9c8d88b5d8
ut.device.internal.CLFunctionCompiler_Spec._setup_closure1d4affd4e1d282168
ut.device.internal.CPU_Kernel_Spec1aa8fb79696a2e1f
ut.device.internal.CPU_Kernel_Spec.__spock_feature_0_0_closure187b2ba863897a24a
ut.device.internal.CPU_Kernel_Spec.__spock_feature_0_1_closure2488b951d707de954
ut.device.internal.CPU_Kernel_Spec.__spock_feature_0_1_closure3e03532b1ffd2928a
ut.device.internal.OpenCL_Data_Specfbce6824700ec03f
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_0_closure39014d1879e650584
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_0_closure42ec675b541b40d2a
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_1_closure571abbbfaf0218365
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_1_closure62d92f47d7c00fea6
ut.device.internal.OpenCL_Data_Spec.__spock_feature_0_1_closure71985cefff81fd5e7
ut.device.internal.OpenCL_Data_Spec._cleanup_closure2f77094e9f53de0c5
ut.device.internal.OpenCL_Data_Spec._setup_closure195be5464dbb9ffdf
ut.device.internal.OpenCL_Kernel_Unit_Spec529af7f5756c8d5c
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_1_closure189fcd3fbaa5ebb90
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_2_closure2414448ba2a9fec1f
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_3_closure3509c70e837ef1da9
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_4_closure433cb60a64575845b
ut.device.internal.OpenCL_Kernel_Unit_Spec.__spock_feature_0_4_closure5f3f14087fbc36b8d
ut.dtype.DataType_Spec00c41829a538ec49
ut.dtype.NumericType_Spece6a3301150e95124
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure11f0c81448d47582d
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure10c6b5ad645c73bc80
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure211aba8088aceb172
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure3038e6e522392d469
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure41ae100f1f9ef2600
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure586e1780bd81421dd
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure639208334d75d260d
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure770d502dadf7a8b38
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure80775248586172999
ut.dtype.NumericType_Spec.__spock_feature_0_2_closure9b46837ce8172df68
ut.framing.Tensor_Framing_Spec1938ab128c388691
ut.framing.Tensor_Framing_Spec.__spock_feature_0_1_closure2cea79d0a8d8caca4
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure30cf76c5be699555e
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure455d5099b4ba5efc3
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure5fed5722dd94195c6
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure64e37e17ca5fe6ee4
ut.framing.Tensor_Framing_Spec.__spock_feature_0_2_closure7df232b7675e789c5
ut.framing.Tensor_Framing_Spec.__spock_feature_0_3_closure8d2f2231e4f5aed0f
ut.framing.Tensor_Framing_Spec._setupSpec_closure19ef65dbafcac880f
ut.introductions.Tensor_NDArray_Spec41898ea4cc64ac60
ut.math.BackendContext_Spec1b21559b59063ffa
ut.math.BackendContext_Spec.__spock_feature_0_1_closure1b04fe63c648bf55e
ut.math.BackendContext_Spec.__spock_feature_0_2_closure23ce8a7d3df1faeab
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure3ef491d333aa3ef47
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure3._closure687047975f15e8872
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure4aaa1abb5f5db131d
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure4._closure7580f096815b30fd8
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure55f6fe14230aaa56a
ut.math.BackendContext_Spec.__spock_feature_0_2prov0_closure5._closure8ea4a06ab2c9389bd
ut.math.ConCat_Spec10b86b7a8c0add6c
ut.math.ConCat_Spec.__spock_feature_0_0_closure11f8ddcea7ee5611d
ut.math.ConCat_Spec.__spock_feature_0_1_closure2f3ea882626314140
ut.math.ConCat_Spec.__spock_feature_0_1_closure300bbb8178a417e39
ut.math.ConCat_Spec.__spock_feature_0_2_closure4cf40cd269196c275
ut.math.ConCat_Spec.__spock_feature_0_2_closure5e25561a6e7d664e1
ut.math.ConCat_Spec.__spock_feature_0_3_closure6532c22aa23f4f8f2
ut.math.ConCat_Spec.__spock_feature_0_4_closure73ad0cd684e9a4e51
ut.math.DummyFunction76f9f9042a95b1c3
ut.math.Function_Exception_Spec55501af3c6a707b3
ut.math.Function_Parsing_Specbf14f5fd1ad8a7f9
ut.math.Function_Scalar_Specc722439d2d04d6c7
ut.math.Function_Spec28db741f2950ba26
ut.math.Function_Spec.__spock_feature_0_0_closure15bf2831c636e202c
ut.math.Function_Spec.__spock_feature_0_0_closure1._closure456fb5a0e6cd4ebcc6
ut.math.Function_Spec.__spock_feature_0_0_closure2cb91f85acf6f4312
ut.math.Function_Spec.__spock_feature_0_0_closure2._closure462c9fd549745e38f7
ut.math.Function_Spec.__spock_feature_0_0prov0_closure312c93826ca54e2dc
ut.math.Function_Spec.__spock_feature_0_0prov0_closure4e733e391d0c903b5
ut.math.Function_Spec.__spock_feature_0_0prov0_closure55f4316347a7e1f5e
ut.math.Function_Spec.__spock_feature_0_1_closure66260f40ddf9eb5f2
ut.math.Function_Spec.__spock_feature_0_1_closure769e102750ab2dc84
ut.math.Function_Spec.__spock_feature_0_1prov0_closure10d4af8f8fd4c1b678
ut.math.Function_Spec.__spock_feature_0_1prov0_closure86cef306b4aefcfa5
ut.math.Function_Spec.__spock_feature_0_1prov0_closure9201716222bf47764
ut.math.Function_Spec.__spock_feature_0_2_closure1133b2cc60d34f89f7
ut.math.Function_Spec.__spock_feature_0_3prov1_closure12de3888f727a6c50e
ut.math.Function_Spec.__spock_feature_0_3prov1_closure134338cc44af56717d
ut.math.Function_Spec.__spock_feature_0_3prov1_closure14544f4604b11ad729
ut.math.Function_Spec.__spock_feature_0_3prov1_closure1559ed275311d48af3
ut.math.Function_Spec.__spock_feature_0_3prov1_closure165105f640fb91a855
ut.math.Function_Spec.__spock_feature_0_3prov1_closure170fd7a31f8ea17288
ut.math.Function_Spec.__spock_feature_0_3prov1_closure184d9f1bfc6e390f24
ut.math.Function_Spec.__spock_feature_0_3prov1_closure194af6ddbaaf6eb7d7
ut.math.Function_Spec.__spock_feature_0_3prov1_closure2084261edb50378cab
ut.math.Function_Spec.__spock_feature_0_3prov1_closure21188076252d610e44
ut.math.Function_Spec.__spock_feature_0_3prov1_closure22f8d973074e6962dd
ut.math.Function_Spec.__spock_feature_0_3prov1_closure23841b3cad02418a31
ut.math.Function_Spec.__spock_feature_0_3prov1_closure240e70852601dbffd3
ut.math.Function_Spec.__spock_feature_0_3prov1_closure25a6008624137b2e69
ut.math.Function_Spec.__spock_feature_0_3prov1_closure268a89d6f60c4e08fb
ut.math.Function_Spec.__spock_feature_0_3prov1_closure27f0db5996a26a79aa
ut.math.Function_Spec.__spock_feature_0_3prov1_closure284573aaa6172406cc
ut.math.Function_Spec.__spock_feature_0_3prov1_closure29f1abb5d51cd127bc
ut.math.Function_Spec.__spock_feature_0_3prov1_closure3048f2406a5331d72d
ut.math.Function_Spec.__spock_feature_0_3prov1_closure31fade6e5b57725983
ut.math.Function_Spec.__spock_feature_0_3prov1_closure32aacb3964575d509a
ut.math.Function_Spec.__spock_feature_0_3prov1_closure3330bcac096be1c4b1
ut.math.Function_Spec.__spock_feature_0_3prov1_closure34517d74acb49231dc
ut.math.Function_Spec.__spock_feature_0_3prov1_closure351bf164e011d80a5f
ut.math.Function_Spec.__spock_feature_0_3prov1_closure36ea1c3f478dd26746
ut.math.Function_Spec.__spock_feature_0_3prov1_closure37c18229eefdeca559
ut.math.Function_Spec.__spock_feature_0_3prov1_closure38edbd0cf74c79b734
ut.math.Function_Spec.__spock_feature_0_3prov1_closure3919fa3bcefdb6a4be
ut.math.Function_Spec.__spock_feature_0_3prov1_closure40da5fb83361465c85
ut.math.Function_Spec.__spock_feature_0_3prov1_closure4170101b20420c47a5
ut.math.Function_Spec.__spock_feature_0_3prov1_closure426a2744809d3e1d7b
ut.math.Function_Spec.__spock_feature_0_3prov1_closure433d7d14a256fa8ac3
ut.math.Function_Spec.__spock_feature_0_3prov1_closure445ff63cf17b760207
ut.math.Tensor_Function_Spec7e0506285fc42411
ut.math.Tensor_Function_Spec.__spock_feature_0_2_closure2146711417ee49343
ut.math.Tensor_Function_Spec.__spock_feature_0_2_closure3a3edbae33c4f3ba2
ut.math.Tensor_Function_Spec.__spock_feature_0_3_closure4377c90a6a5329399
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure5373e0e6195d4bd4f
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure672f027cf8bfcc653
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure750362be310a3fe26
ut.math.Tensor_Function_Spec.__spock_feature_0_5_closure87fa65b64a9403d5a
ut.math.Tensor_Function_Spec._setup_closure1c85acc7d1b5c594e
ut.miscellaneous.Weired_NN_Specfeda06b0a95de0fd
ut.miscellaneous.Weired_NN_Spec.__spock_feature_0_0_closure295d17a987b9107fe
ut.miscellaneous.Weired_NN_Spec.__spock_feature_0_0_closure3d7750ffe2f8e59ef
ut.miscellaneous.Weired_NN_Spec.__spock_feature_0_0_closure4ada045cd81fac399
ut.miscellaneous.Weired_NN_Spec._setup_closure1004280678d07027e
ut.ndas.Nda_Assign_Spec8355de40e624e989
ut.ndas.Nda_Framing_Spec120e6a1490cef8f7
ut.ndas.Nda_Inplace_Framing_Spec8e157a72d124e35c
ut.ndas.Nda_Instantiation_Spec34ab5c5e2a670233
ut.ndas.Nda_Instantiation_Spec.__spock_feature_0_1_closure1fdc1f00957e1108a
ut.ndas.Nda_Items_Spec0fd82f9fb805fb6d
ut.ndas.Nda_Mutation_Spec81501e27e77d344f
ut.ndas.Nda_Reshape_Spec9e2ffe56fdadc46a
ut.ndim.NDConfiguration_Spec090876872e65a525
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure1031b55c1a1c70a25
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure260360ceed64d9813
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure3145d7effd342f6c0
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure42e16d87a979df00a
ut.ndim.NDConfiguration_Spec.__spock_feature_0_0_closure59654cb2c889fc6ca
ut.ndim.Nda_Permute_Spec694f93b17a96465c
ut.ndim.Shape_Specb10050794f53671c
ut.ndim.Shape_Spec.__spock_feature_0_3_closure1e26d7046a8ad0098
ut.ndim.Shape_Spec.__spock_feature_0_5_closure26e28358148441256
ut.ndim.Shape_Spec.__spock_feature_0_5_closure3dac7e968f7c9c6a1
ut.ndim.Shape_Spec.__spock_feature_0_6_closure47659e655fb9d13cf
ut.ndim.Tensor_NDConfiguration_Spec4ab6ea05ca357374
ut.ndim.Tensor_NDConfiguration_Spec._setupSpec_closure10abee18560a845ff
ut.ndim.Tensor_Permute_Speca5802bc618c10bfb
ut.ndim.Tensor_Slice_Permute_Spec0d4115eaef5e8354
ut.ndim.Tensor_Slice_Permute_Spec._setup_closure1bb76b40769537d62
ut.neureka.Neureka_Specf96b04d368da5368
ut.neureka.Neureka_Spec.__spock_feature_0_1_closure35c9cec6b329dbefe
ut.neureka.Neureka_Spec.__spock_feature_0_1_closure4451ff92570184c59
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure10032a0b7249b49e89
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure11ed0e5356d5a08bfa
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure12c4600951968c2180
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure1382e60b4aafc5a903
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure149e82eb2ef16aa029
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure5b1208b6f9ddc8f79
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure6fdc0a1977db19909
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure7e79b269f3c4e4b57
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure829d31446ea81f161
ut.neureka.Neureka_Spec.__spock_feature_0_1prov1_closure9418fdc43bda474c7
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure15edb68bcf644b882d
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure16919d97858c5d0361
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure179f9f8ed3c55da828
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure1853c0cb204091ae71
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure192f797c79ea153e43
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure203d1a219929d1e341
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure2109dbb243cb7c639c
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure22caf851e1cde83d60
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure23c8c18de18efdb9d6
ut.neureka.Neureka_Spec.__spock_feature_0_1prov2_closure246f8ca0b2d107d0fc
ut.neureka.Neureka_Spec.__spock_feature_0_2_closure259b382386f872be84
ut.neureka.Neureka_Spec.__spock_feature_0_2_closure26ae679dd2bf349df8
ut.neureka.Neureka_Spec.__spock_feature_0_3_closure27356f853f6505e297
ut.neureka.Neureka_Spec.__spock_feature_0_4_closure28dace0a29d1e64347
ut.neureka.Neureka_Spec.__spock_feature_0_5_closure295e5feabad44c9c6b
ut.neureka.Neureka_Spec._setup_closure178308babce9d2b7a
ut.neureka.Neureka_Spec._setup_closure2676ad93662a58172
ut.optimization.ADAM_Specb0e4163c78e0d5c2
ut.optimization.ADAM_Spec.__spock_feature_0_1_closure2e3f82a531216cc89
ut.optimization.ADAM_Spec.__spock_feature_0_1_closure36521bdf89ef5379b
ut.optimization.ADAM_Spec.__spock_feature_0_1_closure4928f740fb853536f
ut.optimization.ADAM_Spec._setup_closure1d9751af5d9562bb2
ut.optimization.AdaGrad_Spec9fb6346094396d52
ut.optimization.AdaGrad_Spec._setup_closure1ab51e383142eb818
ut.optimization.Momentum_Spec14d197e83cd29489
ut.optimization.Momentum_Spec._setup_closure1e9f685ce1796bbfc
ut.optimization.RMSProp_Spec894ae3d6e7335c09
ut.optimization.RMSProp_Spec._setup_closure16996d28efe7ea0b0
ut.tensors.Copy_Specab6a9d9d3c92026b
ut.tensors.Copy_Spec.__spock_feature_0_0_closure17a746e50155a5fcd
ut.tensors.Copy_Spec.__spock_feature_0_1_closure285a79a315a22a49d
ut.tensors.Copy_Spec.__spock_feature_0_2_closure32db2bf61db0ce9b7
ut.tensors.Copy_Spec.__spock_feature_0_3_closure4b701005c57336f3e
ut.tensors.Copy_Spec.__spock_feature_0_3prov0_closure59362b11470052bf1
ut.tensors.Copy_Spec.__spock_feature_0_3prov0_closure643f96aa74f980eb1
ut.tensors.DimTrim_Specabc0da1801e5c358
ut.tensors.Expression_Based_Tensor_Instantiation_Spec786788b241ca95a7
ut.tensors.Expression_Based_Tensor_Instantiation_Spec._setup_closure1a3fdcec52cdc1660
ut.tensors.Fluent_Tensor_Creation_Spec26447ce55deb254a
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure19d5e35e3350011d3
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure29cbab550052c961e
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure3ab1749ab8ecdfd5c
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure46ec7919db291c069
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure5f0d4edb4a73ee905
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure69deaaafb25ecdcff
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure7c1544f2f2793a21d
ut.tensors.Fluent_Tensor_Creation_Spec.__spock_feature_0_4prov1_closure8ba9d7da4857c4991
ut.tensors.Functional_Nda_Spec5f36dd76d4c0b792
ut.tensors.Functional_Nda_Spec.__spock_feature_0_0_closure362aff5c2cf477d13
ut.tensors.Functional_Nda_Spec.__spock_feature_0_1_closure47cd7b3c5902c5596
ut.tensors.Functional_Nda_Spec.__spock_feature_0_1_closure5625da25609da5168
ut.tensors.Functional_Nda_Spec.__spock_feature_0_1_closure65c972c78da854a15
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure10f917e869d8732623
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure11c994a27118800b99
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure1205411f098c627ca2
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure130b2919e2f46e2b4a
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure146fea67816416f87e
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure155934b9dcfa61e932
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure169b57df57a8f8d9d6
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure17588903ed868fc95c
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure18aac16f5f4a59c92b
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure195e8062332664ecdd
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure732b5746fe1781382
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure89b0cee5bf774f58c
ut.tensors.Functional_Nda_Spec.__spock_feature_0_2_closure95ace492927e0449b
ut.tensors.Functional_Nda_Spec.__spock_feature_0_3_closure2014026edc45fbbc7a
ut.tensors.Functional_Nda_Spec.__spock_feature_0_4_closure21ab3ada5b7e4e44d7
ut.tensors.Functional_Nda_Spec.__spock_feature_0_5_closure22c36043c5fce3cbd9
ut.tensors.Functional_Nda_Spec.__spock_feature_0_5_closure23761057584cecc2ce
ut.tensors.Functional_Nda_Spec.__spock_feature_0_6_closure24206986295581da5e
ut.tensors.Functional_Nda_Spec.__spock_feature_0_7_closure2505e12c4f3153f69f
ut.tensors.Functional_Nda_Spec.__spock_feature_0_7_closure26b65d3e05ab895990
ut.tensors.Functional_Nda_Spec.__spock_feature_0_7_closure27e57b07b10fcd8772
ut.tensors.Functional_Nda_Spec._setup_closure1a2373f13a9e192ac
ut.tensors.Functional_Nda_Spec._setup_closure2338250c3be787601
ut.tensors.Functional_Tensor_Spec2790b9d0fa963944
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_0_closure547a67ae36033453f
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_1_closure6e5dae0a593480ac6
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_2_closure205d42001c321c2c5b
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_3_closure21822f2f2257e8a7aa
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_4_closure22261eb91734670fe4
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_5_closure25a4d79284d2c5d292
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_6_closure28db3e684efea4612a
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_6_closure29a5e7b6502da40a6e
ut.tensors.Functional_Tensor_Spec.__spock_feature_0_6_closure30e337853b71459b42
ut.tensors.Functional_Tensor_Spec._cleanup_closure44ec21e97bab48184
ut.tensors.Functional_Tensor_Spec._setup_closure188ccacdb70029c86
ut.tensors.Functional_Tensor_Spec._setup_closure250069d8f8339c45a
ut.tensors.Functional_Tensor_Spec._setup_closure33284d1684e10c80e
ut.tensors.Reshape_Specd626b8e4683bc791
ut.tensors.Tensor_As_Container_Spec7423d0b3597d30a4
ut.tensors.Tensor_As_Container_Spec.ComplexNumber2e62737bf9033911
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_2_closure2d47707bec2e3f669
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_2_closure369ce4052d61db3ef
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure10fda041b9f12712a8
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure11452ac740b3af8eb0
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure1238bea9652d87e28f
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure13413e13fc443e812b
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure1445471be04bdbcf21
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure152c2cf2cf4a9c10f4
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure4d67fff274a5224f8
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure5c92e724d0ad3e7d6
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure6e1ce4f73fe5bcab0
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure752a41566e7053baa
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure8c0f7f78fdc024396
ut.tensors.Tensor_As_Container_Spec.__spock_feature_0_3_closure9414dbed256dcedc6
ut.tensors.Tensor_As_Container_Spec._setupSpec_closure114e2579fe28116a5
ut.tensors.Tensor_Assign_Spec77aee2f1b882e4bd
ut.tensors.Tensor_Conversion_Spec5c860bea4e9cd5ba
ut.tensors.Tensor_Conversion_Spec.__spock_feature_0_2_closure1aebb43514404788a
ut.tensors.Tensor_Convolution_Spec411f8ab0ee28b1b2
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_0_closure3689e33b98a9be9ed
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_1_closure45121c125c5bb4a01
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_2_closure5972932fdb40ba94d
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_3_closure6785380041cc7c96e
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_6_closure7007133fb8cad3244
ut.tensors.Tensor_Convolution_Spec.__spock_feature_0_6_closure834993b19b8670203
ut.tensors.Tensor_Convolution_Spec._setup_closure171a726d5d5549756
ut.tensors.Tensor_Convolution_Spec._setup_closure27da58ed02fb4cda5
ut.tensors.Tensor_Device_Spec81d4efc8785cf7ee
ut.tensors.Tensor_Device_Spec.__spock_feature_0_0_closure244a83d36234ad670
ut.tensors.Tensor_Device_Spec.__spock_feature_0_1_closure381a7ac9c55c809ee
ut.tensors.Tensor_Device_Spec.__spock_feature_0_2_closure432c807fe42f00dd5
ut.tensors.Tensor_Device_Spec._setup_closure1a6e82f07858f3139
ut.tensors.Tensor_Dot_Product_Spec3f9d0acfae25eced
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_3_closure1c881eb92dd417bd1
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_4_closure2050aebba456281cd
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_5_closure346a8a7725eac0e5a
ut.tensors.Tensor_Dot_Product_Spec.__spock_feature_0_6_closure49363c2006cfeb76e
ut.tensors.Tensor_Generics_Spec65c72832f6502fcc
ut.tensors.Tensor_Generics_Spec._setup_closure142b8bfd2e4a4d075
ut.tensors.Tensor_Gradient_Specf802a0a3cc001132
ut.tensors.Tensor_Gradient_Spec._setupSpec_closure11ce8c45649ee1836
ut.tensors.Tensor_IO_Spec00f21e73e6df6a40
ut.tensors.Tensor_IO_Spec.__spock_feature_0_2_closure2cb1087bb1f12168e
ut.tensors.Tensor_IO_Spec.__spock_feature_0_3_closure344a90f54fa7071b4
ut.tensors.Tensor_IO_Spec.__spock_feature_0_4_closure4ea060d806919605b
ut.tensors.Tensor_IO_Spec._setup_closure13182bc8d27cb4560
ut.tensors.Tensor_Instantiation_Spec30c91286bcb86b7a
ut.tensors.Tensor_Instantiation_Spec._setup_closure12bf33efe1d5649da
ut.tensors.Tensor_Interop_Spec141b950746c54e8c
ut.tensors.Tensor_Layout_Spec921984b85a52a0ae
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure3fb0934d32ca1f96f
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure4bf8da94d75cbfb1b
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure5ddabaf4e6eec4da0
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure6c8207dffc14db146
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure7d67815780fddc438
ut.tensors.Tensor_Layout_Spec.__spock_feature_0_1_closure8f28491bcba73a87a
ut.tensors.Tensor_Layout_Spec._setup_closure14bc56e5d739a5d7c
ut.tensors.Tensor_Layout_Spec._setup_closure25dcc1f3457edc7bd
ut.tensors.Tensor_Operation_Specbe20a7d5a2eb92cf
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_10_closure704dae0eee70d0a26
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_10_closure8682e3bae7135c8f4
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_10_closure9e6a6fddf22f580c6
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure1086d805e7a92ce567
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure110e52a312dfc60835
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure121164fa7c7d4de1ca
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure13804f803a83637c4e
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure14566f69620cc7489a
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure15b976ade29674a195
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure164f3156b843d3fdb3
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11_closure17cb21100b4e0f2986
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure18457ae795b6ffc399
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure1936f48b40ca454a22
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure20105e82fcb74664e2
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure2163d0ee29cbfced59
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure2206fe9fbc1375d80d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure23c84f8f364974c5f1
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure24f154407204f0fa08
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure25ffefb27270b33efd
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure26b17225b5607b53f3
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure27def8afbb7290db0d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure28082a2db0ce23f3fe
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure2933e3e1658b2b6c9e
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure307325d81392c30c15
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure31f8a89bfa1c995e2d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure32883de76205e71e26
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure33fbb38bb7795d979d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure34511ed3e4ebb99cba
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure352290bf3197031501
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure36d2097f5a45fe3b04
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure3759843cb3cba4693c
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure387755918485f79a4c
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure39bcde8c14ee6c7f73
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure40feb197acc3f0927f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure41c15ba25b74a1566d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure42f176672e14171f4f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure43ce9c52d9a346db5d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure44950d12f83b425de5
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure4512e2564a9b32f573
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure46aadf276eb02062c6
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure476d363da5f5917d57
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure4823e2158aeb10b6a1
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure491c08207d5c4172b3
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure50b9f7c25a01330214
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure51861df7adb662c606
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure5222246052f32150f6
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure53e5cd7a99b6904f67
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure54e25e821aa5047f9d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_11prov4_closure558596f07377fcdd23
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12_closure563ad3997710acc7a3
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure577d299d88caf58c47
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure5821a9fa97be67ba9d
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure595287eee05095fcd6
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure60982a61a159dd0ba2
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_12prov2_closure610f03bc312b9e972f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_4_closure326b03fdbab20209f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_4_closure47a652525681b3b7a
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_4_closure5de80aa4489697f2f
ut.tensors.Tensor_Operation_Spec.__spock_feature_0_8_closure6d3a5da428292ded0
ut.tensors.Tensor_Operation_Spec._setup_closure17fc7dd41f0690d82
ut.tensors.Tensor_Operation_Spec._setup_closure2112db64876fa95f8
ut.tensors.Tensor_Slicing_Specf0545154be74486d
ut.tensors.Tensor_State_Specdcece4d769ba5375
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure2b7aa480d3701f489
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure3f2bc75bba660670e
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure49a6f7fe5dab31d3e
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure5420e64675836ab22
ut.tensors.Tensor_State_Spec.__spock_feature_0_1_closure62b7f3b0f3a26de11
ut.tensors.Tensor_State_Spec.__spock_feature_0_2_closure7fcd36f7e2e03cd2b
ut.tensors.Tensor_State_Spec.__spock_feature_0_2_closure8a63bc80a05be6f63
ut.tensors.Tensor_State_Spec.__spock_feature_0_2_closure9dbc662c3671ac185
ut.tensors.Tensor_State_Spec.__spock_feature_0_6_closure10a43d1422866192ac
ut.tensors.Tensor_State_Spec._setup_closure116b45a3f31be9dab
ut.tensors.Tensor_Stats_Specc554355795a385e7
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_0_closure14fbe300d00ad7762
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_0_closure283b4a8f980fe177f
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_2_closure37875de611babc536
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_2_closure47cdc1b083fb79707
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_4_closure5d0caf5f1e5f93156
ut.tensors.Tensor_Stats_Spec.__spock_feature_0_4_closure65816545aa927a154
ut.tensors.Tensor_Version_Spec369243f21f02df19
ut.tensors.Tensor_Version_Spec.__spock_feature_0_3_closure24e51af45577d5562
ut.tensors.Tensor_Version_Spec._setup_closure1d5ed6fba7952dae9
ut.tensors.exceptions.Tensor_Delete_Exception_Specb6de45a8073578e7
ut.tensors.exceptions.Tensor_Exception_Spece86b3787c3524dd3
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure148aa1c91ce5e3a86
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure101efbb8928f1a0227
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure11f703dbf7d8419719
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure12f1109fd9a9da0280
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure13a79280f813e50c27
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure14de6635dbf4910f13
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure153696cc4233536da3
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure1619cb66ce922d6486
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure175584c564f3ce4552
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure180d93ccda4357c8e4
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure263e869a533277a7d
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure3f58e6bad60180b05
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure4ace749b6d578f3ef
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure574e587ebf6af7ab5
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure61c5becec6bed2030
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure7b3ef5881da213ad5
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure89cef07e48dea0e8e
ut.tensors.exceptions.Tensor_Exception_Spec.__spock_feature_0_1prov1_closure99859af71db77ad19
ut.utility.Cleaner_Testingb1a758d742a575bd
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure1d206d43eb6a14e43
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure108fc4b2d642803399
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure11e2fb1770dbae58d5
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure12a3e0324e0e3d4df9
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure258b71e29cd9da8fc
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure314c583ddfb4b2e94
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure45e224612adaba98c
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure51250dbe69b7d2fe4
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure6d97d69f916c7b15d
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure7950ff40d20113735
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure85308f6646dc7ab6c
ut.utility.Cleaner_Testing.__spock_feature_0_0_closure91f7a6b905b112d04
ut.utility.DataConverter_Specfa3bb332ba3d9faf
ut.utility.FileHandle_Speceae45c6ad7eebcf6
ut.utility.FileHandle_Spec.__spock_feature_0_0_closure28d2f11379307b46e
ut.utility.FileHandle_Spec.__spock_feature_0_1_closure368511350c3823680
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure4a4441160234b702e
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure5686b7fd184b3b543
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure65bf33b86d38fa173
ut.utility.FileHandle_Spec.__spock_feature_0_2_closure7be8f8d5fb656e654
ut.utility.FileHandle_Spec.__spock_feature_0_3_closure8411750250ad620b7
ut.utility.FileHandle_Spec.__spock_feature_0_3_closure92d008bf740a4cbc0
ut.utility.FileHandle_Spec._setup_closure1fcddee8707b066cf
ut.utility.ListReader_Exception_Spec1b2d37efe4862211
ut.utility.ListReader_Exception_Spec.__spock_feature_0_0_closure1911c45ecb920bffa
ut.utility.ListReader_Exception_Spec.__spock_feature_0_1_closure209fa39c8c2cce207
ut.utility.ListReader_Specc795d27ef2c37772
ut.utility.ListReader_Spec.__spock_feature_0_0_closure1c592fbecbcea8d6c
ut.utility.ListReader_Spec.__spock_feature_0_1_closure2826ceea4417998e2
ut.utility.ListReader_Spec.__spock_feature_0_2_closure31b4874a7e9b96992
ut.utility.Utility_Spec70f6ca175dad3feb
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure1bbbbb17af308186c
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure2af0a9a464fc485be
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure3539047dc5c4333d2
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure408869a9a1a95dc9a
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure5f034538c2e604ee8
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure6c4325230fb60090c
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure79f5316f911a2e72d
ut.utility.Utility_Spec.__spock_feature_0_0prov1_closure88fe58577acd30b84
worker.org.gradle.api.JavaVersionaaef7cd2313e04d9
worker.org.gradle.api.internal.jvm.JavaVersionParser1206b4dd1a2e9827
worker.org.gradle.internal.classloader.ClassLoaderSpeccb374b01ccbebc0b
worker.org.gradle.internal.classloader.ClassLoaderUtils8203100709821636
worker.org.gradle.internal.classloader.ClassLoaderUtils.ReflectionClassDefinercebb02b0ac8570bb
worker.org.gradle.internal.classloader.ClassLoaderUtils.ReflectionPackagesFetcherefee7d71a11cc546
worker.org.gradle.internal.classloader.FilteringClassLoader685f3dec8c07e429
worker.org.gradle.internal.classloader.FilteringClassLoader.RetrieveSystemPackagesClassLoaderf37f538880fb8032
worker.org.gradle.internal.classloader.FilteringClassLoader.Spec66254ecaab39094b
worker.org.gradle.internal.classloader.FilteringClassLoader.TrieSet9ca6d89930a3c026
worker.org.gradle.internal.reflect.JavaMethod1b88500ab18c562b
worker.org.gradle.internal.util.Trie19fbee069a29feb3
worker.org.gradle.internal.util.Trie.Builder3ff89b3303eddda1
worker.org.gradle.process.internal.worker.GradleWorkerMain232767ef46e8d7ca
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/ADAction.html b/docs/coverage/test/html/neureka.autograd/ADAction.html index 9e67bf72b..e75cf5beb 100644 --- a/docs/coverage/test/html/neureka.autograd/ADAction.html +++ b/docs/coverage/test/html/neureka.autograd/ADAction.html @@ -1 +1 @@ -ADAction

ADAction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 8369%2 of 875%3871624
partialDerivative()160%20%224411
of(Tensor, ADAction)60%n/a111111
findCaptured()35294%6100%0421001
of(ADAction)6100%n/a010101
\ No newline at end of file +ADAction

ADAction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 8369%2 of 875%3871624
partialDerivative()160%20%224411
of(Tensor, ADAction)60%n/a111111
findCaptured()35294%6100%0421001
of(ADAction)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/ADAction.java.html b/docs/coverage/test/html/neureka.autograd/ADAction.java.html index b04469979..50cd67bd2 100644 --- a/docs/coverage/test/html/neureka.autograd/ADAction.java.html +++ b/docs/coverage/test/html/neureka.autograd/ADAction.java.html @@ -70,4 +70,4 @@ return Optional.empty(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/ADTarget.html b/docs/coverage/test/html/neureka.autograd/ADTarget.html index 657fcd1ca..3c950fc6f 100644 --- a/docs/coverage/test/html/neureka.autograd/ADTarget.html +++ b/docs/coverage/test/html/neureka.autograd/ADTarget.html @@ -1 +1 @@ -ADTarget

ADTarget

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2185%0 of 0n/a141814
node()30%n/a111111
ADTarget(int, GraphNode, Tensor)12100%n/a010501
inputIndex()3100%n/a010101
error()3100%n/a010101
\ No newline at end of file +ADTarget

ADTarget

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2185%0 of 0n/a141814
node()30%n/a111111
ADTarget(int, GraphNode, Tensor)12100%n/a010501
inputIndex()3100%n/a010101
error()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/ADTarget.java.html b/docs/coverage/test/html/neureka.autograd/ADTarget.java.html index de09ee92b..99df71c4f 100644 --- a/docs/coverage/test/html/neureka.autograd/ADTarget.java.html +++ b/docs/coverage/test/html/neureka.autograd/ADTarget.java.html @@ -35,4 +35,4 @@ public Tensor<V> error() { return _error; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector$Value.html b/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector$Value.html index ffba2169a..47ef388c0 100644 --- a/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector$Value.html +++ b/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector$Value.html @@ -1 +1 @@ -BackPropTargetCollector.Value

BackPropTargetCollector.Value

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a030703
BackPropTargetCollector.Value(int, ADAction)16100%n/a010501
index()3100%n/a010101
agents()3100%n/a010101
\ No newline at end of file +BackPropTargetCollector.Value

BackPropTargetCollector.Value

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a030703
BackPropTargetCollector.Value(int, ADAction)16100%n/a010501
index()3100%n/a010101
agents()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.html b/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.html index 64d2f18e3..e863babf2 100644 --- a/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.html +++ b/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.html @@ -1 +1 @@ -BackPropTargetCollector

BackPropTargetCollector

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 75100%0 of 6100%0801105
put(int, GraphNode, ADAction)36100%4100%030501
getTargets()15100%2100%020501
lambda$getTargets$1(Map.Entry)15100%n/a010101
lambda$put$0(GraphNode, GraphNode)6100%n/a010101
BackPropTargetCollector()3100%n/a010101
\ No newline at end of file +BackPropTargetCollector

BackPropTargetCollector

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 75100%0 of 6100%0801105
put(int, GraphNode, ADAction)36100%4100%030501
getTargets()15100%2100%020501
lambda$getTargets$1(Map.Entry)15100%n/a010101
lambda$put$0(GraphNode, GraphNode)6100%n/a010101
BackPropTargetCollector()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.java.html b/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.java.html index 1bfcaaa51..4bfdc1c0a 100644 --- a/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.java.html +++ b/docs/coverage/test/html/neureka.autograd/BackPropTargetCollector.java.html @@ -48,4 +48,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/BackPropTargets.html b/docs/coverage/test/html/neureka.autograd/BackPropTargets.html index 8100ff5d0..27610b3b1 100644 --- a/docs/coverage/test/html/neureka.autograd/BackPropTargets.html +++ b/docs/coverage/test/html/neureka.autograd/BackPropTargets.html @@ -1 +1 @@ -BackPropTargets

BackPropTargets

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a040804
BackPropTargets(int, GraphNode, List)13100%n/a010501
node()3100%n/a010101
index()3100%n/a010101
actions()3100%n/a010101
\ No newline at end of file +BackPropTargets

BackPropTargets

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a040804
BackPropTargets(int, GraphNode, List)13100%n/a010501
node()3100%n/a010101
index()3100%n/a010101
actions()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/BackPropTargets.java.html b/docs/coverage/test/html/neureka.autograd/BackPropTargets.java.html index 1c6687477..7bcca28ed 100644 --- a/docs/coverage/test/html/neureka.autograd/BackPropTargets.java.html +++ b/docs/coverage/test/html/neureka.autograd/BackPropTargets.java.html @@ -21,4 +21,4 @@ public List<ADAction> actions() { return _actions; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/DefaultADAction.html b/docs/coverage/test/html/neureka.autograd/DefaultADAction.html index 81866407f..45cc36156 100644 --- a/docs/coverage/test/html/neureka.autograd/DefaultADAction.html +++ b/docs/coverage/test/html/neureka.autograd/DefaultADAction.html @@ -1 +1 @@ -DefaultADAction

DefaultADAction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 6863%3 of 862%3831304
act(ADTarget)15834%1150%122401
toString()6650%1150%120201
partialDerivative()42083%1375%131601
DefaultADAction(ADAction, Tensor)9100%n/a010101
\ No newline at end of file +DefaultADAction

DefaultADAction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 6863%3 of 862%3831304
act(ADTarget)15834%1150%122401
toString()6650%1150%120201
partialDerivative()42083%1375%131601
DefaultADAction(ADAction, Tensor)9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/DefaultADAction.java.html b/docs/coverage/test/html/neureka.autograd/DefaultADAction.java.html index 4f77103ad..ad6926918 100644 --- a/docs/coverage/test/html/neureka.autograd/DefaultADAction.java.html +++ b/docs/coverage/test/html/neureka.autograd/DefaultADAction.java.html @@ -72,4 +72,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/GraphNode$Print.html b/docs/coverage/test/html/neureka.autograd/GraphNode$Print.html index 4e2d9be6f..00aaee6a4 100644 --- a/docs/coverage/test/html/neureka.autograd/GraphNode$Print.html +++ b/docs/coverage/test/html/neureka.autograd/GraphNode$Print.html @@ -1 +1 @@ -GraphNode.Print

GraphNode.Print

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 21100%0 of 0n/a010101
static {...}21100%n/a010101
\ No newline at end of file +GraphNode.Print

GraphNode.Print

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 34100%0 of 0n/a010101
static {...}34100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/GraphNode.html b/docs/coverage/test/html/neureka.autograd/GraphNode.html index 10fcd0055..1d21c4fb3 100644 --- a/docs/coverage/test/html/neureka.autograd/GraphNode.html +++ b/docs/coverage/test/html/neureka.autograd/GraphNode.html @@ -1 +1 @@ -GraphNode

GraphNode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total472 of 2,08577%67 of 25673%64221673731392
_verifyErrorAccumulation(Set)134128%7112%45243001
_findRootPathsRecursively(List, List, List, GraphNode)720%140%88151511
lambda$_verifyErrorAccumulation$10(GraphNode)520%n/a117711
lambda$_verifyErrorAccumulation$12(List, int)230%n/a111111
lambda$_verifyErrorAccumulation$11(List, int)230%n/a111111
_toString(String, boolean, GraphNode.Print, int)2114387%31583%21011401
_registerADActions(Result, Function, ExecutionCall)1818591%63083%61913201
_backward(Tensor, Set, boolean)144977%21083%2711401
lambda$_verifyErrorAccumulation$8(Function)120%n/a115511
lambda$_migrateAndOrApplyError$2(Tensor, Consumer, Tensor)112367%2675%154901
_backwardJIT(Tensor, GraphNode)82071%4233%342801
lambda$has$20(GraphNode, BackPropTargets)80%20%221111
lambda$_verifyErrorAccumulation$5(GraphNode)80%20%221111
has(GraphNode)70%n/a111111
lambda$_verifyErrorAccumulation$6(Function)70%20%221111
GraphNode(Function, ExecutionCall, Supplier)513296%11995%11102701
_checkConstructorArgValidity(Function, ExecutionCall)54990%11392%1811201
_attachChild(GraphNode)51676%1150%121501
lambda$new$0()50%n/a111111
isGraphLeave()42485%2466%241401
toString(GraphNode.Print)42184%1375%141601
getPayloadDataType()40%n/a111111
lambda$_verifyErrorAccumulation$9(WeakReference)40%n/a111111
lambda$_verifyErrorAccumulation$7(Function)40%n/a111111
canBeDeleted()32990%2675%250701
lambda$_carryPendingBackPropToGradients$15(Set, Tensor)32488%2466%241601
_deleteDerivativesRecursively()31986%2675%250401
_simpleToString()4897%1375%130401
forEachDerivative(BiConsumer)1392%1150%120301
_forEachTargetActionPair(BiConsumer)1392%1150%120401
hasDerivatives()787%1150%120101
lambda$_numberOfDerivativeUsages$30(Tensor, Tensor)685%1150%120101
_checkInputValidity(Tensor[], Function)62100%8100%050901
backward(Tensor)62100%8100%0501301
_forEachBackRef(Tensor, BiConsumer)52100%6100%040601
lambda$_registerADActions$1(Tensor, Result, Function, ExecutionCall, int, BackPropTargetCollector, BackPropTargets, ADAction)48100%n/a0101001
_compactToString()47100%2100%020701
type()43100%2100%020801
_numberOfReverseModeADChildren()35100%4866%470801
lambda$_compactToString$38(NDPrintSettings)29100%n/a0101401
lambda$_informPartialDerivative$19(Tensor)24100%2100%020401
lambda$_numberOfDerivativeUsages$31(Tensor, GraphNode)20100%n/a010801
lambda$_backwardJIT$16(GraphNode, Tensor)18100%1375%130501
_fancyToString(int)16100%n/a010101
lambda$_numberOfExistingAncestors$27(GraphNode)15100%n/a010201
_carryPendingBackPropToGradients(Set)14100%n/a010401
forEachTarget(Consumer)14100%2100%020301
_numberOfDerivativeUsages(Tensor)12100%n/a010601
lambda$_parentsToString$37(GraphNode[])12100%n/a010401
_numberOfExistingAncestors()11100%n/a010601
isLeave()10100%1375%130101
_parentsToString()10100%n/a010301
lambda$_parentsToString$35(Tensor)10100%n/a010101
getParents()9100%2100%020101
lambda$_parentsToString$36(Optional)9100%n/a010301
getAndRemovePendingError()8100%n/a010301
lambda$_numberOfExistingAncestors$26(Tensor)8100%2100%020101
_migrateAndOrApplyError(Tensor, Consumer)7100%n/a010201
usesAD()7100%2100%020101
usesForwardAD()7100%2100%020101
usesReverseAD()7100%2100%020101
backwardJIT(Tensor)7100%n/a010301
isUsedAsDerivative()7100%2100%020101
lambda$_parentsToString$34(Tensor)7100%2100%020101
lambda$type$33(Tensor)7100%2100%020101
lambda$type$32(Tensor)7100%2100%020101
lambda$_forEachTargetActionPair$25(BiConsumer, BackPropTargets)7100%n/a010101
lambda$forEachDerivative$22(BiConsumer, BackPropTargets)7100%n/a010101
lambda$_numberOfDerivativeUsages$29(ADAction)6100%n/a010101
lambda$forEachDerivative$21(BiConsumer, BackPropTargets, ADAction)6100%n/a010101
lambda$_backward$13(Set, GraphNode, Tensor)6100%n/a010101
getPayload()5100%n/a010101
update(Component.OwnerChangeRequest)5100%n/a010201
_informPartialDerivative(ADAction)5100%n/a010301
lambda$_forEachTargetActionPair$24(BiConsumer, BackPropTargets, ADAction)5100%n/a010101
lambda$forEachTarget$23(Consumer, BackPropTargets)5100%n/a010101
lambda$_backwardJIT$17(GraphNode, GraphNode, Tensor)5100%n/a010101
getPayloadShape()4100%n/a010101
size()4100%n/a010101
getPendingError()4100%n/a010101
getFunction()4100%n/a010101
getPayloadReferenceVersion()4100%n/a010101
getChildren()4100%n/a010101
toString()4100%n/a010101
lambda$_compactToString$39(Tensor)4100%n/a010101
lambda$_numberOfDerivativeUsages$28(BackPropTargets)4100%n/a010101
lambda$_carryPendingBackPropToGradients$14(Set, GraphNode)4100%n/a010101
lambda$backward$4(GraphNode)4100%n/a010101
lambda$backward$3(Set, GraphNode)4100%n/a010101
getMode()3100%n/a010101
isReliesOnJustInTimeProp()3100%n/a010101
lambda$_informPartialDerivative$18(Tensor)3100%n/a010101
\ No newline at end of file +GraphNode

GraphNode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total478 of 2,08577%69 of 25673%66221673731392
_verifyErrorAccumulation(Set)134128%7112%45243001
_findRootPathsRecursively(List, List, List, GraphNode)720%140%88151511
lambda$_verifyErrorAccumulation$10(GraphNode)520%n/a117711
lambda$_verifyErrorAccumulation$12(List, int)230%n/a111111
lambda$_verifyErrorAccumulation$11(List, int)230%n/a111111
_toString(String, boolean, GraphNode.Print, int)2114387%31583%21011401
_registerADActions(Result, Function, ExecutionCall)1818591%63083%61913201
lambda$_migrateAndOrApplyError$2(Tensor, Consumer, Tensor)161852%3562%254901
_backward(Tensor, Set, boolean)144977%21083%2711401
lambda$_verifyErrorAccumulation$8(Function)120%n/a115511
_backwardJIT(Tensor, GraphNode)82071%4233%342801
lambda$has$20(GraphNode, BackPropTargets)80%20%221111
lambda$_verifyErrorAccumulation$5(GraphNode)80%20%221111
has(GraphNode)70%n/a111111
lambda$_verifyErrorAccumulation$6(Function)70%20%221111
GraphNode(Function, ExecutionCall, Supplier)513296%11995%11102701
_checkConstructorArgValidity(Function, ExecutionCall)54990%11392%1811201
_attachChild(GraphNode)51676%1150%121501
lambda$new$0()50%n/a111111
isGraphLeave()42485%2466%241401
toString(GraphNode.Print)42184%1375%141601
getPayloadDataType()40%n/a111111
lambda$_verifyErrorAccumulation$9(WeakReference)40%n/a111111
lambda$_verifyErrorAccumulation$7(Function)40%n/a111111
canBeDeleted()32990%2675%250701
lambda$_carryPendingBackPropToGradients$15(Set, Tensor)32488%2466%241601
_deleteDerivativesRecursively()31986%2675%250401
_simpleToString()4897%1375%130401
forEachDerivative(BiConsumer)1392%1150%120301
_forEachTargetActionPair(BiConsumer)1392%1150%120401
hasDerivatives()787%1150%120101
lambda$type$32(Tensor)685%1150%120101
lambda$_numberOfDerivativeUsages$30(Tensor, Tensor)685%1150%120101
_checkInputValidity(Tensor[], Function)62100%8100%050901
backward(Tensor)62100%8100%0501301
_forEachBackRef(Tensor, BiConsumer)52100%6100%040601
lambda$_registerADActions$1(Tensor, Result, Function, ExecutionCall, int, BackPropTargetCollector, BackPropTargets, ADAction)48100%n/a0101001
_compactToString()47100%2100%020701
type()43100%2100%020801
_numberOfReverseModeADChildren()35100%4866%470801
lambda$_compactToString$38(NDPrintSettings)29100%n/a0101401
lambda$_informPartialDerivative$19(Tensor)24100%2100%020401
lambda$_numberOfDerivativeUsages$31(Tensor, GraphNode)20100%n/a010801
lambda$_backwardJIT$16(GraphNode, Tensor)18100%1375%130501
_fancyToString(int)16100%n/a010101
lambda$_numberOfExistingAncestors$27(GraphNode)15100%n/a010201
_carryPendingBackPropToGradients(Set)14100%n/a010401
forEachTarget(Consumer)14100%2100%020301
_numberOfDerivativeUsages(Tensor)12100%n/a010601
lambda$_parentsToString$37(GraphNode[])12100%n/a010401
_numberOfExistingAncestors()11100%n/a010601
isLeave()10100%1375%130101
_parentsToString()10100%n/a010301
lambda$_parentsToString$35(Tensor)10100%n/a010101
getParents()9100%2100%020101
lambda$_parentsToString$36(Optional)9100%n/a010301
getAndRemovePendingError()8100%n/a010301
lambda$_numberOfExistingAncestors$26(Tensor)8100%2100%020101
_migrateAndOrApplyError(Tensor, Consumer)7100%n/a010201
usesAD()7100%2100%020101
usesForwardAD()7100%2100%020101
usesReverseAD()7100%2100%020101
backwardJIT(Tensor)7100%n/a010301
isUsedAsDerivative()7100%2100%020101
lambda$_parentsToString$34(Tensor)7100%2100%020101
lambda$type$33(Tensor)7100%2100%020101
lambda$_forEachTargetActionPair$25(BiConsumer, BackPropTargets)7100%n/a010101
lambda$forEachDerivative$22(BiConsumer, BackPropTargets)7100%n/a010101
lambda$_numberOfDerivativeUsages$29(ADAction)6100%n/a010101
lambda$forEachDerivative$21(BiConsumer, BackPropTargets, ADAction)6100%n/a010101
lambda$_backward$13(Set, GraphNode, Tensor)6100%n/a010101
getPayload()5100%n/a010101
update(Component.OwnerChangeRequest)5100%n/a010201
_informPartialDerivative(ADAction)5100%n/a010301
lambda$_forEachTargetActionPair$24(BiConsumer, BackPropTargets, ADAction)5100%n/a010101
lambda$forEachTarget$23(Consumer, BackPropTargets)5100%n/a010101
lambda$_backwardJIT$17(GraphNode, GraphNode, Tensor)5100%n/a010101
getPayloadShape()4100%n/a010101
size()4100%n/a010101
getPendingError()4100%n/a010101
getFunction()4100%n/a010101
getPayloadReferenceVersion()4100%n/a010101
getChildren()4100%n/a010101
toString()4100%n/a010101
lambda$_compactToString$39(Tensor)4100%n/a010101
lambda$_numberOfDerivativeUsages$28(BackPropTargets)4100%n/a010101
lambda$_carryPendingBackPropToGradients$14(Set, GraphNode)4100%n/a010101
lambda$backward$4(GraphNode)4100%n/a010101
lambda$backward$3(Set, GraphNode)4100%n/a010101
getMode()3100%n/a010101
isReliesOnJustInTimeProp()3100%n/a010101
lambda$_informPartialDerivative$18(Tensor)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/GraphNode.java.html b/docs/coverage/test/html/neureka.autograd/GraphNode.java.html index 3f9c5a1aa..9bdf35e28 100644 --- a/docs/coverage/test/html/neureka.autograd/GraphNode.java.html +++ b/docs/coverage/test/html/neureka.autograd/GraphNode.java.html @@ -288,7 +288,7 @@ this.getPayload().ifPresent( payload -> { // It was not garbage collected: try { - if ( payload.isOutsourced() ) payload.getDevice().store( e ); + if ( payload.isOutsourced() ) payload.getDevice().store( e ); } catch ( Exception exception ) { if ( payload.isUndefined() ) throw new IllegalStateException( @@ -911,7 +911,7 @@ if ( this.isLeave() ) type += "LEAVE"; else type += "BRANCH"; type += this.getPayload() - .filter( p -> !p.isDeleted() ) + .filter( p -> !p.isDeleted() ) .map( p -> p.rqsGradient() ? " RQS GRADIENT" : "" ) .orElse(" DELETED"); return type; @@ -1022,4 +1022,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.html b/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.html index e63646b8c..286c7d722 100644 --- a/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.html +++ b/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.html @@ -1 +1 @@ -GraphNodeUtility

GraphNodeUtility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 92100%0 of 18100%01001601
modeOf(AutoDiffMode, ExecutionCall)92100%18100%01001601
\ No newline at end of file +GraphNodeUtility

GraphNodeUtility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 92100%0 of 18100%01001601
modeOf(AutoDiffMode, ExecutionCall)92100%18100%01001601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.java.html b/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.java.html index ca8803473..9bcf4da80 100644 --- a/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.java.html +++ b/docs/coverage/test/html/neureka.autograd/GraphNodeUtility.java.html @@ -49,4 +49,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/JITProp.html b/docs/coverage/test/html/neureka.autograd/JITProp.html index 8b5536444..4d4d701db 100644 --- a/docs/coverage/test/html/neureka.autograd/JITProp.html +++ b/docs/coverage/test/html/neureka.autograd/JITProp.html @@ -1 +1 @@ -JITProp

JITProp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total55 of 21173%20 of 3441%182693729
addPending(Set)220%40%336611
lambda$execute$0(GraphNode)101458%4233%341601
finishedCount()90%20%221111
execute()61571%2250%231601
toString()54389%2250%230301
pendingCount()3666%1150%120101
noteFinished(GraphNode)55100%4450%450901
JITProp(Set)13100%n/a010401
isDone()10100%1375%130101
\ No newline at end of file +JITProp

JITProp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total55 of 21173%20 of 3441%182693729
addPending(Set)220%40%336611
lambda$execute$0(GraphNode)101458%4233%341601
finishedCount()90%20%221111
execute()61571%2250%231601
toString()54389%2250%230301
pendingCount()3666%1150%120101
noteFinished(GraphNode)55100%4450%450901
JITProp(Set)13100%n/a010401
isDone()10100%1375%130101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/JITProp.java.html b/docs/coverage/test/html/neureka.autograd/JITProp.java.html index 3cbc52d7c..d7e1a25bf 100644 --- a/docs/coverage/test/html/neureka.autograd/JITProp.java.html +++ b/docs/coverage/test/html/neureka.autograd/JITProp.java.html @@ -96,4 +96,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/NodePayload.html b/docs/coverage/test/html/neureka.autograd/NodePayload.html index 54a3f582c..b84ea30c8 100644 --- a/docs/coverage/test/html/neureka.autograd/NodePayload.html +++ b/docs/coverage/test/html/neureka.autograd/NodePayload.html @@ -1 +1 @@ -NodePayload

NodePayload

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 7585%3 of 862%41011716
NodePayload(Tensor)44191%1375%1301201
payloadDataType()30%n/a111111
getPayloadShape()21184%1150%120101
getPayload()2880%1150%120101
payloadReferenceVersion()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +NodePayload

NodePayload

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 7585%3 of 862%41011716
NodePayload(Tensor)44191%1375%1301201
payloadDataType()30%n/a111111
getPayloadShape()21184%1150%120101
getPayload()2880%1150%120101
payloadReferenceVersion()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/NodePayload.java.html b/docs/coverage/test/html/neureka.autograd/NodePayload.java.html index 2a71852c5..5a1688a9c 100644 --- a/docs/coverage/test/html/neureka.autograd/NodePayload.java.html +++ b/docs/coverage/test/html/neureka.autograd/NodePayload.java.html @@ -64,4 +64,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/PendingError.html b/docs/coverage/test/html/neureka.autograd/PendingError.html index 99c18ca5f..3673f8d35 100644 --- a/docs/coverage/test/html/neureka.autograd/PendingError.html +++ b/docs/coverage/test/html/neureka.autograd/PendingError.html @@ -1 +1 @@ -PendingError

PendingError

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total55 of 12054%1 of 475%51152049
toString()260%n/a111111
accumulate(Tensor)203060%1150%121601
getGeneration()30%n/a111111
getReceived()30%n/a111111
getExpectedToBeReceived()30%n/a111111
PendingError(Tensor, int, int)15100%n/a010601
isFullyAccumulated()9100%2100%020101
lambda$accumulate$0(Tensor)8100%n/a010201
getAccumulatedError()3100%n/a010101
\ No newline at end of file +PendingError

PendingError

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total55 of 12054%1 of 475%51152049
toString()260%n/a111111
accumulate(Tensor)203060%1150%121601
getGeneration()30%n/a111111
getReceived()30%n/a111111
getExpectedToBeReceived()30%n/a111111
PendingError(Tensor, int, int)15100%n/a010601
isFullyAccumulated()9100%2100%020101
lambda$accumulate$0(Tensor)8100%n/a010201
getAccumulatedError()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/PendingError.java.html b/docs/coverage/test/html/neureka.autograd/PendingError.java.html index 22b8d8cdc..17d5593aa 100644 --- a/docs/coverage/test/html/neureka.autograd/PendingError.java.html +++ b/docs/coverage/test/html/neureka.autograd/PendingError.java.html @@ -65,4 +65,4 @@ public Tensor<V> getAccumulatedError() { return _accumulatedError; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/index.html b/docs/coverage/test/html/neureka.autograd/index.html index 9d3e75794..c2fc4baa3 100644 --- a/docs/coverage/test/html/neureka.autograd/index.html +++ b/docs/coverage/test/html/neureka.autograd/index.html @@ -1 +1 @@ -neureka.autograd

neureka.autograd

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total646 of 2,89577%96 of 34271%983149352723142012
GraphNode4721,61377%6718973%6422167373139201
JITProp5515673%201441%18269372901
PendingError556554%375%5115204901
ADAction255869%675%387162401
DefaultADAction254363%3562%383130401
NodePayload6485%3562%4101171601
ADTarget1885%n/a14181401
GraphNodeUtility92100%18100%0100160101
BackPropTargetCollector75100%6100%080110501
BackPropTargetCollector.Value22100%n/a03070301
BackPropTargets22100%n/a04080401
GraphNode.Print21100%n/a01010101
\ No newline at end of file +neureka.autograd

neureka.autograd

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total652 of 2,90877%98 of 34271%1003149352723142012
GraphNode4781,60777%6918773%6622167373139201
JITProp5515673%201441%18269372901
PendingError556554%375%5115204901
ADAction255869%675%387162401
DefaultADAction254363%3562%383130401
NodePayload6485%3562%4101171601
ADTarget1885%n/a14181401
GraphNodeUtility92100%18100%0100160101
BackPropTargetCollector75100%6100%080110501
GraphNode.Print34100%n/a01010101
BackPropTargetCollector.Value22100%n/a03070301
BackPropTargets22100%n/a04080401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.autograd/index.source.html b/docs/coverage/test/html/neureka.autograd/index.source.html index 20075bd86..f52fcfd96 100644 --- a/docs/coverage/test/html/neureka.autograd/index.source.html +++ b/docs/coverage/test/html/neureka.autograd/index.source.html @@ -1 +1 @@ -neureka.autograd

neureka.autograd

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total646 of 2,89577%96 of 34271%983149352723142012
GraphNode.java4721,63477%6718973%6422267374139302
JITProp.java5515673%201441%18269372901
PendingError.java556554%375%5115204901
ADAction.java255869%675%387162401
DefaultADAction.java254363%3562%383130401
NodePayload.java6485%3562%4101171601
ADTarget.java1885%n/a14181401
BackPropTargetCollector.java97100%6100%0110180802
GraphNodeUtility.java92100%18100%0100160101
BackPropTargets.java22100%n/a04080401
\ No newline at end of file +neureka.autograd

neureka.autograd

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total652 of 2,90877%98 of 34271%1003149352723142012
GraphNode.java4781,64177%6918773%6622267374139302
JITProp.java5515673%201441%18269372901
PendingError.java556554%375%5115204901
ADAction.java255869%675%387162401
DefaultADAction.java254363%3562%383130401
NodePayload.java6485%3562%4101171601
ADTarget.java1885%n/a14181401
BackPropTargetCollector.java97100%6100%0110180802
GraphNodeUtility.java92100%18100%0100160101
BackPropTargets.java22100%n/a04080401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1$1.html b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1$1.html index c67f63d3a..f9cfbdfa8 100644 --- a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1$1.html +++ b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1$1.html @@ -1 +1 @@ -BackendRegistry.1.new ReceiveForOperation() {...}

BackendRegistry.1.new ReceiveForOperation() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 23100%0 of 0n/a020302
set(Class, Function)14100%n/a010201
{...}9100%n/a010101
\ No newline at end of file +BackendRegistry.1.new ReceiveForOperation() {...}

BackendRegistry.1.new ReceiveForOperation() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 23100%0 of 0n/a020302
set(Class, Function)14100%n/a010201
{...}9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1.html b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1.html index e49808762..0f70bbc5d 100644 --- a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1.html +++ b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry$1.html @@ -1 +1 @@ -BackendRegistry.new ReceiveForDevice() {...}

BackendRegistry.new ReceiveForDevice() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 2657%0 of 0n/a132413
set(Class, Class, Function)110%n/a112211
{...}9100%n/a010101
forOperation(Class)6100%n/a010101
\ No newline at end of file +BackendRegistry.new ReceiveForDevice() {...}

BackendRegistry.new ReceiveForDevice() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 2657%0 of 0n/a132413
set(Class, Class, Function)110%n/a112211
{...}9100%n/a010101
forOperation(Class)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.html b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.html index 5c68f55fc..555c1435d 100644 --- a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.html +++ b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.html @@ -1 +1 @@ -BackendRegistry

BackendRegistry

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 17100%0 of 0n/a030503
BackendRegistry(ImplementationReceiver)6100%n/a010301
forDevice(Class)6100%n/a010101
of(ImplementationReceiver)5100%n/a010101
\ No newline at end of file +BackendRegistry

BackendRegistry

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 17100%0 of 0n/a030503
BackendRegistry(ImplementationReceiver)6100%n/a010301
forDevice(Class)6100%n/a010101
of(ImplementationReceiver)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.java.html b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.java.html index 1d06f59b7..e0c909b8f 100644 --- a/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.java.html +++ b/docs/coverage/test/html/neureka.backend.api.ini/BackendRegistry.java.html @@ -51,4 +51,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.ini/index.html b/docs/coverage/test/html/neureka.backend.api.ini/index.html index 6b4e09dee..00b6cddfb 100644 --- a/docs/coverage/test/html/neureka.backend.api.ini/index.html +++ b/docs/coverage/test/html/neureka.backend.api.ini/index.html @@ -1 +1 @@ -neureka.backend.api.ini

neureka.backend.api.ini

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total11 of 6683%0 of 0n/a182101803
BackendRegistry.new ReceiveForDevice() {...}111557%n/a13241301
BackendRegistry.1.new ReceiveForOperation() {...}23100%n/a02030201
BackendRegistry17100%n/a03050301
\ No newline at end of file +neureka.backend.api.ini

neureka.backend.api.ini

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total11 of 6683%0 of 0n/a182101803
BackendRegistry.new ReceiveForDevice() {...}111557%n/a13241301
BackendRegistry.1.new ReceiveForOperation() {...}23100%n/a02030201
BackendRegistry17100%n/a03050301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.ini/index.source.html b/docs/coverage/test/html/neureka.backend.api.ini/index.source.html index 9985a4123..79bde3e94 100644 --- a/docs/coverage/test/html/neureka.backend.api.ini/index.source.html +++ b/docs/coverage/test/html/neureka.backend.api.ini/index.source.html @@ -1 +1 @@ -neureka.backend.api.ini

neureka.backend.api.ini

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total11 of 6683%0 of 0n/a182101803
BackendRegistry.java115583%n/a182101803
\ No newline at end of file +neureka.backend.api.ini

neureka.backend.api.ini

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total11 of 6683%0 of 0n/a182101803
BackendRegistry.java115583%n/a182101803
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.html index 9e864d36d..01cdaad8f 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.html @@ -1 +1 @@ -AbstractAlgorithm

AbstractAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
AbstractAlgorithm(String)6100%n/a010101
getName()3100%n/a010101
\ No newline at end of file +AbstractAlgorithm

AbstractAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
AbstractAlgorithm(String)6100%n/a010101
getName()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.java.html index 3a0c1c862..afcb5e44f 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractAlgorithm.java.html @@ -27,4 +27,4 @@ public String getName() { return _name; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.html index 7325dc9cc..572544713 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.html @@ -1 +1 @@ -AbstractDeviceAlgorithm

AbstractDeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total166 of 1,28887%32 of 16280%3011318211132
lambda$_deepDerivative$6(ExecutionCall, Function[], FinalExecutor)5819677%111963%91654501
_deepActivation(ExecutionCall, Function[], boolean, boolean, FinalExecutor)3513178%61672%51242701
executeDeviceAlgorithm(ExecutionCall)206676%4866%4731601
_couldNotFindSuitableAlgorithmFor(Class)200%n/a113311
executeOnCommonDevice(ExecutionCall, Supplier)147784%51168%5911401
setImplementationFor(Class, ImplementationFor)131350%1150%122501
executeFor(Function, ExecutionCall, FinalExecutor)43990%1375%130801
_indexOfFoundDerivative(Tensor[])7197%11794%11001301
lambda$_flatten$1(Function[], ExecutionCall, ExecutionCall)109100%14100%0801601
_innerTimesOuter(Tensor, Tensor[], ExecutionCall)75100%8100%0501001
toString()46100%n/a010401
_deepDerivative(ExecutionCall, Function[], FinalExecutor)41100%8100%050801
_prepareForExecution(ExecutionCall)37100%6100%040601
getImplementationFor(Class)33100%6100%040601
_deleteIfNotIn(Tensor[], Tensor)27100%2675%250501
_flatten(ExecutionCall, Function[], boolean)25100%2100%020301
_couldNotFindSuitableImplementationFor(Operation, Algorithm, Class)20100%n/a010401
_delete(Tensor)15100%4100%030401
lambda$_deepActivation$4(String, boolean, Tensor[])11100%n/a010101
lambda$_deepActivation$2(int)11100%n/a010101
prepareAndExecute(ExecutionCall, FinalExecutor)10100%n/a010201
flatten(Function, ExecutionCall)10100%n/a010101
flattenForIndexer(Function, ExecutionCall)10100%n/a010101
lambda$prepareAndExecute$0(FinalExecutor, ExecutionCall)10100%1150%120401
AbstractDeviceAlgorithm(String)9100%n/a010201
lambda$_deepDerivative$7(ExecutionCall, Function[], FinalExecutor)9100%n/a010101
_flatten(ExecutionCall, Function[])5100%n/a010101
lambda$executeOnCommonDevice$8(Device, Tensor)5100%n/a010101
static {...}4100%n/a010101
lambda$_deepDerivative$5(ExecutionCall)3100%n/a010101
lambda$_deepActivation$3(int)3100%n/a010101
lambda$executeOnCommonDevice$9(Tensor)100%n/a010101
\ No newline at end of file +AbstractDeviceAlgorithm

AbstractDeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total167 of 1,28887%34 of 16279%3211318211132
lambda$_deepDerivative$6(ExecutionCall, Function[], FinalExecutor)5819677%111963%91654501
_deepActivation(ExecutionCall, Function[], boolean, boolean, FinalExecutor)3513178%71568%61242701
executeDeviceAlgorithm(ExecutionCall)206676%4866%4731601
_couldNotFindSuitableAlgorithmFor(Class)200%n/a113311
executeOnCommonDevice(ExecutionCall, Supplier)147784%51168%5911401
setImplementationFor(Class, ImplementationFor)131350%1150%122501
executeFor(Function, ExecutionCall, FinalExecutor)43990%1375%130801
_indexOfFoundDerivative(Tensor[])7197%11794%11001301
_deleteIfNotIn(Tensor[], Tensor)2696%3562%350501
lambda$_flatten$1(Function[], ExecutionCall, ExecutionCall)109100%14100%0801601
_innerTimesOuter(Tensor, Tensor[], ExecutionCall)75100%8100%0501001
toString()46100%n/a010401
_deepDerivative(ExecutionCall, Function[], FinalExecutor)41100%8100%050801
_prepareForExecution(ExecutionCall)37100%6100%040601
getImplementationFor(Class)33100%6100%040601
_flatten(ExecutionCall, Function[], boolean)25100%2100%020301
_couldNotFindSuitableImplementationFor(Operation, Algorithm, Class)20100%n/a010401
_delete(Tensor)15100%4100%030401
lambda$_deepActivation$4(String, boolean, Tensor[])11100%n/a010101
lambda$_deepActivation$2(int)11100%n/a010101
prepareAndExecute(ExecutionCall, FinalExecutor)10100%n/a010201
flatten(Function, ExecutionCall)10100%n/a010101
flattenForIndexer(Function, ExecutionCall)10100%n/a010101
lambda$prepareAndExecute$0(FinalExecutor, ExecutionCall)10100%1150%120401
AbstractDeviceAlgorithm(String)9100%n/a010201
lambda$_deepDerivative$7(ExecutionCall, Function[], FinalExecutor)9100%n/a010101
_flatten(ExecutionCall, Function[])5100%n/a010101
lambda$executeOnCommonDevice$8(Device, Tensor)5100%n/a010101
static {...}4100%n/a010101
lambda$_deepDerivative$5(ExecutionCall)3100%n/a010101
lambda$_deepActivation$3(int)3100%n/a010101
lambda$executeOnCommonDevice$9(Tensor)100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.java.html index 2894a5be0..c4dd72782 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractDeviceAlgorithm.java.html @@ -216,7 +216,7 @@ ExecutionCall<?> flattenedCall = _flatten( call.withArgs( Arg.VarIdx.of(j) ), nodes ); - if ( + if ( !isFlat && j < 0 && ( call.getOperation().isOperator() || @@ -412,7 +412,7 @@ private static void _deleteIfNotIn(Tensor<?>[] array, Tensor<?> tensor ) { if ( Neureka.get().settings().debug().isDeletingIntermediateTensors() ) { for ( int i = 1; i < array.length; i++ ) - if ( array[i] == tensor ) return; + if ( array[i] == tensor ) return; if ( !tensor.isDeleted() ) tensor.mut().delete(); } @@ -470,4 +470,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.html index 5e5780530..cace2574f 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.html @@ -1 +1 @@ -AbstractFunAlgorithm

AbstractFunAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total103 of 21151%10 of 1844%9211239012
_checked(Object, Object, Class)36716%4233%344701
execute(Function, ExecutionCall)301938%2250%234901
_checkReadiness()21416%1150%122401
buildFunAlgorithm()161446%3350%342501
setIsSuitableFor(SuitabilityPredicate)11100%n/a010201
setAutogradModeFor(ADSupportPredicate)11100%n/a010201
setExecution(Execution)11100%n/a010201
AbstractFunAlgorithm(String)7100%n/a010301
isSuitableFor(ExecutionCall)7100%n/a010201
autoDiffModeFrom(ExecutionCall)7100%n/a010201
lambda$execute$0(Function, ExecutionCall)6100%n/a010101
static {...}4100%n/a010101
\ No newline at end of file +AbstractFunAlgorithm

AbstractFunAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total103 of 21151%10 of 1844%9211239012
_checked(Object, Object, Class)36716%4233%344701
execute(Function, ExecutionCall)301938%2250%234901
_checkReadiness()21416%1150%122401
buildFunAlgorithm()161446%3350%342501
setIsSuitableFor(SuitabilityPredicate)11100%n/a010201
setAutogradModeFor(ADSupportPredicate)11100%n/a010201
setExecution(Execution)11100%n/a010201
AbstractFunAlgorithm(String)7100%n/a010301
isSuitableFor(ExecutionCall)7100%n/a010201
autoDiffModeFrom(ExecutionCall)7100%n/a010201
lambda$execute$0(Function, ExecutionCall)6100%n/a010101
static {...}4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.java.html index 917bd5faa..71bdee67e 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunAlgorithm.java.html @@ -165,4 +165,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.html index c0f74db2b..42bb3ccae 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.html @@ -1 +1 @@ -AbstractFunDeviceAlgorithm

AbstractFunDeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total55 of 31982%8 of 3476%835760018
execute(Function, ExecutionCall)393043%3562%3551301
_checked(Object, Object, Class)162762%1583%142701
buildFunAlgorithm()39100%4866%470501
prepare(ExecutionCall)26100%2100%020901
_checkReadiness()25100%2100%020401
lambda$execute$3(Function, ExecutionCall)18100%2100%020301
setIsSuitableFor(SuitabilityPredicate)11100%n/a010201
setSupplyADActionFor(ADActionSupplier)11100%n/a010201
setCallPreparation(ExecutionPreparation)11100%n/a010201
setAutogradModeFor(ADSupportPredicate)11100%n/a010201
setExecution(Execution)11100%n/a010201
AbstractFunDeviceAlgorithm(String)7100%n/a010201
isSuitableFor(ExecutionCall)7100%n/a010201
autoDiffModeFrom(ExecutionCall)7100%n/a010201
lambda$prepare$0(Tensor, Tensor)7100%2100%020101
lambda$prepare$2(Tensor)6100%n/a010101
lambda$prepare$1(Tensor[], Tensor)6100%n/a010201
static {...}4100%n/a010101
\ No newline at end of file +AbstractFunDeviceAlgorithm

AbstractFunDeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total55 of 31982%8 of 3476%835760018
execute(Function, ExecutionCall)393043%3562%3551301
_checked(Object, Object, Class)162762%1583%142701
buildFunAlgorithm()39100%4866%470501
prepare(ExecutionCall)26100%2100%020901
_checkReadiness()25100%2100%020401
lambda$execute$3(Function, ExecutionCall)18100%2100%020301
setIsSuitableFor(SuitabilityPredicate)11100%n/a010201
setSupplyADActionFor(ADActionSupplier)11100%n/a010201
setCallPreparation(ExecutionPreparation)11100%n/a010201
setAutogradModeFor(ADSupportPredicate)11100%n/a010201
setExecution(Execution)11100%n/a010201
AbstractFunDeviceAlgorithm(String)7100%n/a010201
isSuitableFor(ExecutionCall)7100%n/a010201
autoDiffModeFrom(ExecutionCall)7100%n/a010201
lambda$prepare$0(Tensor, Tensor)7100%2100%020101
lambda$prepare$2(Tensor)6100%n/a010101
lambda$prepare$1(Tensor[], Tensor)6100%n/a010201
static {...}4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.java.html index b9c1e12c8..231af881c 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/AbstractFunDeviceAlgorithm.java.html @@ -261,4 +261,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.html index c00173e89..868cc57fe 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.html @@ -1 +1 @@ -FallbackAlgorithm

FallbackAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total122 of 65581%15 of 5974%125323125122
lambda$new$2(double[], ExecutionCall, Function, int, int)470%40%335511
lambda$new$4(Operation, ExecutionCall)216174%2466%2462301
_tryExecute(ExecutionCall, Class)214467%7436%5841601
_tryExecute(Method, Object[], int)193866%2100%023701
prepare(ExecutionCall)94483%2250%2331501
_findMethod(String, Class)59595%12100%0722101
isSuitableFor(ExecutionCall)45100%10100%060701
lambda$_tryExecute$8(ExecutionCall, Method, int, int)43100%4100%030601
lambda$new$3(ExecutionCall, int, int)37100%4100%030601
ADAction(Function, ExecutionCall)35100%2100%020701
FallbackAlgorithm(String, int, Operation)14100%n/a010501
lambda$ADAction$7(Function, Tensor, ADTarget)14100%n/a010101
lambda$ADAction$5(Function, Tensor, ADTarget)14100%n/a010101
setAt(Tensor, int, Object)10100%n/a010201
execute(Function, ExecutionCall)8100%n/a010101
lambda$ADAction$6(Function, ExecutionCall)7100%n/a010101
dispatch(Function, ExecutionCall)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
supplyADActionFor(Function, ExecutionCall)4100%n/a010101
lambda$new$1(Tensor)4100%n/a010101
static {...}4100%n/a010101
autoDiffModeFrom(ExecutionCall)2100%n/a010101
\ No newline at end of file +FallbackAlgorithm

FallbackAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total122 of 65581%15 of 5974%125323125122
lambda$new$2(double[], ExecutionCall, Function, int, int)470%40%335511
lambda$new$4(Operation, ExecutionCall)216174%2466%2462301
_tryExecute(ExecutionCall, Class)214467%7436%5841601
_tryExecute(Method, Object[], int)193866%2100%023701
prepare(ExecutionCall)94483%2250%2331501
_findMethod(String, Class)59595%12100%0722101
isSuitableFor(ExecutionCall)45100%10100%060701
lambda$_tryExecute$8(ExecutionCall, Method, int, int)43100%4100%030601
lambda$new$3(ExecutionCall, int, int)37100%4100%030601
ADAction(Function, ExecutionCall)35100%2100%020701
FallbackAlgorithm(String, int, Operation)14100%n/a010501
lambda$ADAction$7(Function, Tensor, ADTarget)14100%n/a010101
lambda$ADAction$5(Function, Tensor, ADTarget)14100%n/a010101
setAt(Tensor, int, Object)10100%n/a010201
execute(Function, ExecutionCall)8100%n/a010101
lambda$ADAction$6(Function, ExecutionCall)7100%n/a010101
dispatch(Function, ExecutionCall)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
supplyADActionFor(Function, ExecutionCall)4100%n/a010101
lambda$new$1(Tensor)4100%n/a010101
static {...}4100%n/a010101
autoDiffModeFrom(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.java.html index 7e60ef85a..4710ceea8 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FallbackAlgorithm.java.html @@ -245,4 +245,4 @@ return Result.of(this.dispatch(caller, call)).withAutoDiff(this); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.html index c9a4a86cc..5719b1359 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.html @@ -1 +1 @@ -FunAlgorithm

FunAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010101
FunAlgorithm(String)4100%n/a010101
\ No newline at end of file +FunAlgorithm

FunAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010101
FunAlgorithm(String)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.java.html index 0611af91d..76abbad16 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunAlgorithm.java.html @@ -4,4 +4,4 @@ { public FunAlgorithm( String name ) { super(name); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.html index 91f654109..86cc707b5 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.html @@ -1 +1 @@ -FunDeviceAlgorithm

FunDeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010101
FunDeviceAlgorithm(String)4100%n/a010101
\ No newline at end of file +FunDeviceAlgorithm

FunDeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010101
FunDeviceAlgorithm(String)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.java.html index d5c2ba6c6..fa05d852f 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/FunDeviceAlgorithm.java.html @@ -4,4 +4,4 @@ { public FunDeviceAlgorithm( String name ) { super( name ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.html index 700ca34a2..823748a4a 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.html @@ -1 +1 @@ -neureka.backend.api.template.algorithms

neureka.backend.api.template.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total446 of 2,49082%65 of 27376%592266043928807
AbstractDeviceAlgorithm1661,12287%3213080%301131821113201
FallbackAlgorithm12253381%154474%12532312512201
AbstractFunAlgorithm10310851%10844%921123901201
AbstractFunDeviceAlgorithm5526482%82676%83576001801
AbstractAlgorithm100%n/a02020201
FunDeviceAlgorithm100%n/a01010101
FunAlgorithm100%n/a01010101
\ No newline at end of file +neureka.backend.api.template.algorithms

neureka.backend.api.template.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total447 of 2,49082%67 of 27375%612266043928807
AbstractDeviceAlgorithm1671,12187%3412879%321131821113201
FallbackAlgorithm12253381%154474%12532312512201
AbstractFunAlgorithm10310851%10844%921123901201
AbstractFunDeviceAlgorithm5526482%82676%83576001801
AbstractAlgorithm100%n/a02020201
FunDeviceAlgorithm100%n/a01010101
FunAlgorithm100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.source.html b/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.source.html index f62968357..38859d1d1 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.source.html +++ b/docs/coverage/test/html/neureka.backend.api.template.algorithms/index.source.html @@ -1 +1 @@ -neureka.backend.api.template.algorithms

neureka.backend.api.template.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total446 of 2,49082%65 of 27376%592266043928807
AbstractDeviceAlgorithm.java1661,12287%3213080%301131821113201
FallbackAlgorithm.java12253381%154474%12532312512201
AbstractFunAlgorithm.java10310851%10844%921123901201
AbstractFunDeviceAlgorithm.java5526482%82676%83576001801
AbstractAlgorithm.java100%n/a02020201
FunAlgorithm.java100%n/a01010101
FunDeviceAlgorithm.java100%n/a01010101
\ No newline at end of file +neureka.backend.api.template.algorithms

neureka.backend.api.template.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total447 of 2,49082%67 of 27375%612266043928807
AbstractDeviceAlgorithm.java1671,12187%3412879%321131821113201
FallbackAlgorithm.java12253381%154474%12532312512201
AbstractFunAlgorithm.java10310851%10844%921123901201
AbstractFunDeviceAlgorithm.java5526482%82676%83576001801
AbstractAlgorithm.java100%n/a02020201
FunAlgorithm.java100%n/a01010101
FunDeviceAlgorithm.java100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.html b/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.html index c9cf4c093..e7f818039 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.html +++ b/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.html @@ -1 +1 @@ -AbstractImplementationFor

AbstractImplementationFor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total34 of 6245%3 of 650%2511002
run(ExecutionCall)341935%3350%241601
AbstractImplementationFor(ImplementationFor, int)9100%n/a010401
\ No newline at end of file +AbstractImplementationFor

AbstractImplementationFor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total34 of 6245%3 of 650%2511002
run(ExecutionCall)341935%3350%241601
AbstractImplementationFor(ImplementationFor, int)9100%n/a010401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.java.html b/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.java.html index 27672a23c..dfe585459 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.implementations/AbstractImplementationFor.java.html @@ -30,4 +30,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.implementations/index.html b/docs/coverage/test/html/neureka.backend.api.template.implementations/index.html index 29560314c..30dbfba9f 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.implementations/index.html +++ b/docs/coverage/test/html/neureka.backend.api.template.implementations/index.html @@ -1 +1 @@ -neureka.backend.api.template.implementations

neureka.backend.api.template.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total34 of 6245%3 of 650%251100201
AbstractImplementationFor342845%3350%251100201
\ No newline at end of file +neureka.backend.api.template.implementations

neureka.backend.api.template.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total34 of 6245%3 of 650%251100201
AbstractImplementationFor342845%3350%251100201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.implementations/index.source.html b/docs/coverage/test/html/neureka.backend.api.template.implementations/index.source.html index 3abe6d791..6d1a04454 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.implementations/index.source.html +++ b/docs/coverage/test/html/neureka.backend.api.template.implementations/index.source.html @@ -1 +1 @@ -neureka.backend.api.template.implementations

neureka.backend.api.template.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total34 of 6245%3 of 650%251100201
AbstractImplementationFor.java342845%3350%251100201
\ No newline at end of file +neureka.backend.api.template.implementations

neureka.backend.api.template.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total34 of 6245%3 of 650%251100201
AbstractImplementationFor.java342845%3350%251100201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.html b/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.html index 2ba8b0566..3c50b0011 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.html +++ b/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.html @@ -1 +1 @@ -AbstractOperation

AbstractOperation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total54 of 39686%4 of 2684%635777222
getAlgorithmFor(ExecutionCall)196577%1990%1631801
setAlgorithm(Class, Algorithm)151346%1150%122501
asDerivative(Function[], int)150%n/a111111
isDifferentiable()30%n/a111111
toString()24395%1150%120501
stringify(String[])88100%1990%1601401
AbstractOperation(OperationBuilder)52100%n/a0101201
getAlgorithm(Class)24100%2100%020901
getAllAlgorithms()8100%n/a010101
lambda$getAlgorithm$0(Class, Map.Entry)6100%n/a010101
supportsAlgorithm(Class)5100%n/a010101
supports(Class)5100%n/a010101
operationName()4100%n/a010101
lambda$getAlgorithm$1(Map.Entry)4100%n/a010101
static {...}4100%n/a010101
isOperator()3100%n/a010101
getIdentifier()3100%n/a010101
getOperator()3100%n/a010101
getArity()3100%n/a010101
isIndexer()3100%n/a010101
isInline()3100%n/a010101
getDefaultAlgorithm()3100%n/a010101
\ No newline at end of file +AbstractOperation

AbstractOperation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total59 of 39685%4 of 2684%735877322
getAlgorithmFor(ExecutionCall)196577%1990%1631801
setAlgorithm(Class, Algorithm)151346%1150%122501
asDerivative(Function[], int)150%n/a111111
supportsAlgorithm(Class)50%n/a111111
isDifferentiable()30%n/a111111
toString()24395%1150%120501
stringify(String[])88100%1990%1601401
AbstractOperation(OperationBuilder)52100%n/a0101201
getAlgorithm(Class)24100%2100%020901
getAllAlgorithms()8100%n/a010101
lambda$getAlgorithm$0(Class, Map.Entry)6100%n/a010101
supports(Class)5100%n/a010101
operationName()4100%n/a010101
lambda$getAlgorithm$1(Map.Entry)4100%n/a010101
static {...}4100%n/a010101
isOperator()3100%n/a010101
getIdentifier()3100%n/a010101
getOperator()3100%n/a010101
getArity()3100%n/a010101
isIndexer()3100%n/a010101
isInline()3100%n/a010101
getDefaultAlgorithm()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.java.html b/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.java.html index f6bee2a9f..3aa649438 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.operations/AbstractOperation.java.html @@ -150,7 +150,7 @@ */ @Override public final <T extends Algorithm> boolean supportsAlgorithm( Class<T> type ) { - return _algorithms.containsKey( type ); + return _algorithms.containsKey( type ); } /** @@ -280,4 +280,4 @@ return this.getClass().getSimpleName(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder$1.html b/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder$1.html index 731117e88..156a49e3b 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder$1.html +++ b/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder$1.html @@ -1 +1 @@ -OperationBuilder.new AbstractOperation() {...}

OperationBuilder.new AbstractOperation() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 4641%3 of 425%475925
asDerivative(Function[], int)160%20%223311
calculate(double[], int, int, Function[])70%n/a111111
stringify(String[])41071%1150%121301
{...}7100%n/a010101
operationName()2100%n/a010101
\ No newline at end of file +OperationBuilder.new AbstractOperation() {...}

OperationBuilder.new AbstractOperation() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 4636%3 of 425%576935
asDerivative(Function[], int)160%20%223311
calculate(double[], int, int, Function[])70%n/a111111
stringify(String[])41071%1150%121301
operationName()20%n/a111111
{...}7100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.html b/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.html index 2801c4345..382e4829a 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.html +++ b/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.html @@ -1 +1 @@ -OperationBuilder

OperationBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total153 of 34655%29 of 5446%3049771322
_listOfMissingProperties()282749%7750%780901
setDerivation(OperationBuilder.Derivation)210%40%334411
build()181545%2250%231501
arity(int)101254%2250%230401
isOperator(boolean)101254%2250%230401
isIndexer(boolean)101254%2250%230401
isDifferentiable(boolean)101254%2250%230401
isInline(boolean)101254%2250%230401
stringifier(OperationBuilder.Stringifier)101152%2250%230401
identifier(String)101152%2250%230401
operator(String)101152%2250%230401
getStringifier()30%n/a111111
getDerivator()30%n/a111111
OperationBuilder()33100%n/a0101101
dispose()4100%n/a010101
getIdentifier()3100%n/a010101
getOperator()3100%n/a010101
getArity()3100%n/a010101
getIsOperator()3100%n/a010101
getIsIndexer()3100%n/a010101
getIsDifferentiable()3100%n/a010101
getIsInline()3100%n/a010101
\ No newline at end of file +OperationBuilder

OperationBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total153 of 34655%29 of 5446%3049771322
_listOfMissingProperties()282749%7750%780901
setDerivation(OperationBuilder.Derivation)210%40%334411
build()181545%2250%231501
arity(int)101254%2250%230401
isOperator(boolean)101254%2250%230401
isIndexer(boolean)101254%2250%230401
isDifferentiable(boolean)101254%2250%230401
isInline(boolean)101254%2250%230401
stringifier(OperationBuilder.Stringifier)101152%2250%230401
identifier(String)101152%2250%230401
operator(String)101152%2250%230401
getStringifier()30%n/a111111
getDerivator()30%n/a111111
OperationBuilder()33100%n/a0101101
dispose()4100%n/a010101
getIdentifier()3100%n/a010101
getOperator()3100%n/a010101
getArity()3100%n/a010101
getIsOperator()3100%n/a010101
getIsIndexer()3100%n/a010101
getIsDifferentiable()3100%n/a010101
getIsInline()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.java.html b/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.java.html index 3ef5d42a2..16de03ea7 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.java.html +++ b/docs/coverage/test/html/neureka.backend.api.template.operations/OperationBuilder.java.html @@ -188,11 +188,11 @@ return src[ 0 ].call( inputs, j ); } - @Override protected String operationName() { return "OptimizedOperation"; } + @Override protected String operationName() { return "OptimizedOperation"; } }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.operations/index.html b/docs/coverage/test/html/neureka.backend.api.template.operations/index.html index bb9bbc555..e77334c2d 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.operations/index.html +++ b/docs/coverage/test/html/neureka.backend.api.template.operations/index.html @@ -1 +1 @@ -neureka.backend.api.template.operations

neureka.backend.api.template.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total234 of 78870%36 of 8457%40911915674903
OperationBuilder15319355%292546%304977132201
AbstractOperation5434286%42284%63577722201
OperationBuilder.new AbstractOperation() {...}271941%3125%47592501
\ No newline at end of file +neureka.backend.api.template.operations

neureka.backend.api.template.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total241 of 78869%36 of 8457%42912115694903
OperationBuilder15319355%292546%304977132201
AbstractOperation5933785%42284%73587732201
OperationBuilder.new AbstractOperation() {...}291736%3125%57693501
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api.template.operations/index.source.html b/docs/coverage/test/html/neureka.backend.api.template.operations/index.source.html index a99874097..a057fcd90 100644 --- a/docs/coverage/test/html/neureka.backend.api.template.operations/index.source.html +++ b/docs/coverage/test/html/neureka.backend.api.template.operations/index.source.html @@ -1 +1 @@ -neureka.backend.api.template.operations

neureka.backend.api.template.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total234 of 78870%36 of 8457%40911915674903
OperationBuilder.java18021254%322644%3456127952702
AbstractOperation.java5434286%42284%63577722201
\ No newline at end of file +neureka.backend.api.template.operations

neureka.backend.api.template.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total241 of 78869%36 of 8457%42912115694903
OperationBuilder.java18221053%322644%3556137962702
AbstractOperation.java5933785%42284%73587732201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Algorithm.html b/docs/coverage/test/html/neureka.backend.api/Algorithm.html index 0ff8d01a0..767d65863 100644 --- a/docs/coverage/test/html/neureka.backend.api/Algorithm.html +++ b/docs/coverage/test/html/neureka.backend.api/Algorithm.html @@ -1 +1 @@ -Algorithm

Algorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 11100%0 of 0n/a010201
withName(String)11100%n/a010201
\ No newline at end of file +Algorithm

Algorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 11100%0 of 0n/a010201
withName(String)11100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Algorithm.java.html b/docs/coverage/test/html/neureka.backend.api/Algorithm.java.html index 2480e20a0..b90d80787 100644 --- a/docs/coverage/test/html/neureka.backend.api/Algorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api/Algorithm.java.html @@ -85,4 +85,4 @@ String getName(); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.html b/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.html index 11c308c39..a68cc05da 100644 --- a/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.html +++ b/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.html @@ -1 +1 @@ -AutoDiffMode

AutoDiffMode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 47100%1 of 887%170703
static {...}27100%n/a010501
allowsForward()10100%1375%130101
allowsBackward()10100%4100%030101
\ No newline at end of file +AutoDiffMode

AutoDiffMode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 64100%1 of 887%170703
static {...}44100%n/a010501
allowsForward()10100%1375%130101
allowsBackward()10100%4100%030101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.java.html b/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.java.html index 0ee92ea1d..ece26fd25 100644 --- a/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.java.html +++ b/docs/coverage/test/html/neureka.backend.api/AutoDiffMode.java.html @@ -16,4 +16,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendContext$1.html b/docs/coverage/test/html/neureka.backend.api/BackendContext$1.html index 19f07b0a1..21340a75d 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendContext$1.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendContext$1.html @@ -1 +1 @@ -BackendContext.new ImplementationReceiver() {...}

BackendContext.new ImplementationReceiver() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 24100%0 of 0n/a020302
accept(Class, Class, Class, Function)15100%n/a010201
{...}9100%n/a010101
\ No newline at end of file +BackendContext.new ImplementationReceiver() {...}

BackendContext.new ImplementationReceiver() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 24100%0 of 0n/a020302
accept(Class, Class, Class, Function)15100%n/a010201
{...}9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendContext$2.html b/docs/coverage/test/html/neureka.backend.api/BackendContext$2.html index ae62e3195..a974c4e42 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendContext$2.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendContext$2.html @@ -1 +1 @@ -BackendContext.new LoadingContext() {...}

BackendContext.new LoadingContext() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2080%0 of 0n/a131313
getAlgorithmName()40%n/a111111
{...}12100%n/a010101
getOperationIdentidier()4100%n/a010101
\ No newline at end of file +BackendContext.new LoadingContext() {...}

BackendContext.new LoadingContext() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 2060%0 of 0n/a232323
getAlgorithmName()40%n/a111111
getOperationIdentidier()40%n/a111111
{...}12100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendContext$Registered.html b/docs/coverage/test/html/neureka.backend.api/BackendContext$Registered.html index 9d08757ea..3805a8204 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendContext$Registered.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendContext$Registered.html @@ -1 +1 @@ -BackendContext.Registered

BackendContext.Registered

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a010601
BackendContext.Registered(BackendContext, Class, Class, Class, Function)18100%n/a010601
\ No newline at end of file +BackendContext.Registered

BackendContext.Registered

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a010601
BackendContext.Registered(BackendContext, Class, Class, Class, Function)18100%n/a010601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendContext$Runner.html b/docs/coverage/test/html/neureka.backend.api/BackendContext$Runner.html index b9fca75d6..c30adb780 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendContext$Runner.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendContext$Runner.html @@ -1 +1 @@ -BackendContext.Runner

BackendContext.Runner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4893%1 of 250%1601505
BackendContext.Runner(BackendContext, BackendContext)31280%1150%120501
runAndGet(Supplier)13100%n/a010401
run(Runnable)12100%n/a010401
call(Supplier)4100%n/a010101
invoke(Supplier)4100%n/a010101
\ No newline at end of file +BackendContext.Runner

BackendContext.Runner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4893%1 of 250%1601505
BackendContext.Runner(BackendContext, BackendContext)31280%1150%120501
runAndGet(Supplier)13100%n/a010401
run(Runnable)12100%n/a010401
call(Supplier)4100%n/a010101
invoke(Supplier)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendContext.html b/docs/coverage/test/html/neureka.backend.api/BackendContext.html index 689fc0fbf..3d68ddaef 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendContext.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendContext.html @@ -1 +1 @@ -BackendContext

BackendContext

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total55 of 39686%6 of 2272%835772224
set(BackendExtension)216375%2466%2421401
reset()141856%2100%022701
addOperation(Operation)85086%2250%2301001
hasOperation(String)50%n/a111111
has(Class)50%n/a111111
_register(BackendContext.Registered)26496%2880%2611101
BackendContext()34100%n/a010901
clone()21100%n/a010501
toString()16100%n/a010101
runner()8100%n/a010101
getOperation(String)7100%n/a010101
hasOperation(Operation)6100%n/a010101
getOperation(int)6100%n/a010101
getFunction()5100%n/a010101
getAutogradFunction()5100%n/a010101
find(Class)5100%n/a010101
getExtensions()5100%n/a010101
lambda$new$1()5100%n/a010101
lambda$new$0()5100%n/a010101
getOperationLookupMap()4100%n/a010101
getOperations()4100%n/a010101
static {...}4100%n/a010101
size()3100%n/a010101
getFunctionCache()3100%n/a010101
\ No newline at end of file +BackendContext

BackendContext

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total61 of 39684%6 of 2272%935872324
set(BackendExtension)216375%2466%2421401
reset()141856%2100%022701
addOperation(Operation)85086%2250%2301001
hasOperation(Operation)60%n/a111111
hasOperation(String)50%n/a111111
has(Class)50%n/a111111
_register(BackendContext.Registered)26496%2880%2611101
BackendContext()34100%n/a010901
clone()21100%n/a010501
toString()16100%n/a010101
runner()8100%n/a010101
getOperation(String)7100%n/a010101
getOperation(int)6100%n/a010101
getFunction()5100%n/a010101
getAutogradFunction()5100%n/a010101
find(Class)5100%n/a010101
getExtensions()5100%n/a010101
lambda$new$1()5100%n/a010101
lambda$new$0()5100%n/a010101
getOperationLookupMap()4100%n/a010101
getOperations()4100%n/a010101
static {...}4100%n/a010101
size()3100%n/a010101
getFunctionCache()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendContext.java.html b/docs/coverage/test/html/neureka.backend.api/BackendContext.java.html index 1875ff799..6de2abc4f 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendContext.java.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendContext.java.html @@ -186,7 +186,7 @@ * @return The truth value determining if the provided {@link Operation} is part of this {@link BackendContext}. */ public boolean hasOperation( Operation operation ) { - return _lookup.containsKey( operation.getIdentifier() ); + return _lookup.containsKey( operation.getIdentifier() ); } /** @@ -333,7 +333,7 @@ registered.deviceType, registered.function.apply(new LoadingContext() { @Override public String getAlgorithmName() { return da.getName(); } - @Override public String getOperationIdentidier() { return o.getIdentifier(); } + @Override public String getOperationIdentidier() { return o.getIdentifier(); } }) ); return true; @@ -445,4 +445,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendExtension$DeviceOption.html b/docs/coverage/test/html/neureka.backend.api/BackendExtension$DeviceOption.html index dcd6fc52b..23b9a7ca2 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendExtension$DeviceOption.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendExtension$DeviceOption.html @@ -1 +1 @@ -BackendExtension.DeviceOption

BackendExtension.DeviceOption

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 15100%0 of 0n/a030603
BackendExtension.DeviceOption(Device, double)9100%n/a010401
device()3100%n/a010101
confidence()3100%n/a010101
\ No newline at end of file +BackendExtension.DeviceOption

BackendExtension.DeviceOption

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 15100%0 of 0n/a030603
BackendExtension.DeviceOption(Device, double)9100%n/a010401
device()3100%n/a010101
confidence()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendExtension.html b/docs/coverage/test/html/neureka.backend.api/BackendExtension.html index 308cc9aa8..88d0272cb 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendExtension.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendExtension.html @@ -1 +1 @@ -BackendExtension

BackendExtension

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 1100%0 of 0n/a010101
reset()1100%n/a010101
\ No newline at end of file +BackendExtension

BackendExtension

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 1100%0 of 0n/a010101
reset()1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/BackendExtension.java.html b/docs/coverage/test/html/neureka.backend.api/BackendExtension.java.html index 29b035afe..f1fe64030 100644 --- a/docs/coverage/test/html/neureka.backend.api/BackendExtension.java.html +++ b/docs/coverage/test/html/neureka.backend.api/BackendExtension.java.html @@ -68,4 +68,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Call$Builder.html b/docs/coverage/test/html/neureka.backend.api/Call$Builder.html index 7f9e510b0..f3bcbea5a 100644 --- a/docs/coverage/test/html/neureka.backend.api/Call$Builder.html +++ b/docs/coverage/test/html/neureka.backend.api/Call$Builder.html @@ -1 +1 @@ -Call.Builder

Call.Builder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 82100%0 of 2100%0601105
andArgs(List)24100%2100%020301
Call.Builder(Device)21100%n/a010201
andArgs(Arg[])14100%n/a010201
get()12100%n/a010101
with(Tensor[])11100%n/a010301
\ No newline at end of file +Call.Builder

Call.Builder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 82100%0 of 2100%0601105
andArgs(List)24100%2100%020301
Call.Builder(Device)21100%n/a010201
andArgs(Arg[])14100%n/a010201
get()12100%n/a010101
with(Tensor[])11100%n/a010301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Call$Validator$Estimator.html b/docs/coverage/test/html/neureka.backend.api/Call$Validator$Estimator.html index 1e79f670c..b72464bf1 100644 --- a/docs/coverage/test/html/neureka.backend.api/Call$Validator$Estimator.html +++ b/docs/coverage/test/html/neureka.backend.api/Call$Validator$Estimator.html @@ -1 +1 @@ -Call.Validator.Estimator

Call.Validator.Estimator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total41 of 12867%9 of 2259%924416413
goodIfAll(Call.TensorCondition)100%20%221111
badIfAll(Call.TensorCondition)100%20%221111
goodIfAll(Call.TensorCompare)100%20%221111
badIfAll(Call.TensorCompare)100%20%221111
Call.Validator.Estimator(Call.Validator, boolean)11292%1150%120301
_mod(float)22100%n/a010401
goodIfAny(Call.TensorCondition)10100%2100%020101
badIfAny(Call.TensorCondition)10100%2100%020101
lambda$badIfAnyNonNull$1(Call.TensorCondition, Tensor)10100%4100%030101
lambda$goodIfAnyNonNull$0(Call.TensorCondition, Tensor)10100%4100%030101
goodIfAnyNonNull(Call.TensorCondition)5100%n/a010101
badIfAnyNonNull(Call.TensorCondition)5100%n/a010101
getEstimation()3100%n/a010101
\ No newline at end of file +Call.Validator.Estimator

Call.Validator.Estimator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total41 of 12867%11 of 2250%1124416413
goodIfAll(Call.TensorCondition)100%20%221111
badIfAll(Call.TensorCondition)100%20%221111
goodIfAll(Call.TensorCompare)100%20%221111
badIfAll(Call.TensorCompare)100%20%221111
Call.Validator.Estimator(Call.Validator, boolean)11292%1150%120301
_mod(float)22100%n/a010401
goodIfAny(Call.TensorCondition)10100%2100%020101
badIfAny(Call.TensorCondition)10100%2100%020101
lambda$badIfAnyNonNull$1(Call.TensorCondition, Tensor)10100%1375%130101
lambda$goodIfAnyNonNull$0(Call.TensorCondition, Tensor)10100%1375%130101
goodIfAnyNonNull(Call.TensorCondition)5100%n/a010101
badIfAnyNonNull(Call.TensorCondition)5100%n/a010101
getEstimation()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Call$Validator.html b/docs/coverage/test/html/neureka.backend.api/Call$Validator.html index f94426092..6fc68901e 100644 --- a/docs/coverage/test/html/neureka.backend.api/Call$Validator.html +++ b/docs/coverage/test/html/neureka.backend.api/Call$Validator.html @@ -1 +1 @@ -Call.Validator

Call.Validator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total118 of 47475%29 of 8867%23681677524
_anyNotNullMatch(Call.TensorCondition)340%80%554411
last(Call.TensorCondition)250%40%333311
first(Call.TensorCondition)210%40%333311
any(Call.TensorCondition)180%40%333311
anyNotNull(Call.TensorCondition)150%20%223311
all(Call.TensorCompare)31583%2250%230301
_allMatch(Call.TensorCompare)23394%1583%140601
allShare(Function)52100%3975%3701101
_allHaveSame(Call.TensorProperty)48100%8100%0501101
_allNotNullMatch(Call.TensorCondition)34100%1787%150401
_anyMatch(Call.TensorCondition)32100%6100%040301
_allMatch(Call.TensorCondition)32100%6100%040301
tensors(Call.TensorsCondition)20100%4100%030301
allNotNull(Call.TensorCondition)18100%4100%030301
all(Call.TensorCondition)15100%2100%020301
allNotNullHaveSame(Call.TensorProperty)15100%2100%020301
Call.Validator(Call)9100%n/a010201
ifValid(Object)8100%2100%020201
suitabilityIfValid(float)7100%2100%020101
getEstimator()7100%n/a010101
basicSuitability()4100%n/a010101
isValid()3100%n/a010101
lambda$ifValid$1(Object)2100%n/a010101
lambda$ifValid$0(Object, Object)2100%n/a010101
\ No newline at end of file +Call.Validator

Call.Validator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total118 of 47475%29 of 8867%23681677524
_anyNotNullMatch(Call.TensorCondition)340%80%554411
last(Call.TensorCondition)250%40%333311
first(Call.TensorCondition)210%40%333311
any(Call.TensorCondition)180%40%333311
anyNotNull(Call.TensorCondition)150%20%223311
all(Call.TensorCompare)31583%2250%230301
_allMatch(Call.TensorCompare)23394%1583%140601
allShare(Function)52100%3975%3701101
_allHaveSame(Call.TensorProperty)48100%8100%0501101
_allNotNullMatch(Call.TensorCondition)34100%1787%150401
_anyMatch(Call.TensorCondition)32100%6100%040301
_allMatch(Call.TensorCondition)32100%6100%040301
tensors(Call.TensorsCondition)20100%4100%030301
allNotNull(Call.TensorCondition)18100%4100%030301
all(Call.TensorCondition)15100%2100%020301
allNotNullHaveSame(Call.TensorProperty)15100%2100%020301
Call.Validator(Call)9100%n/a010201
ifValid(Object)8100%2100%020201
suitabilityIfValid(float)7100%2100%020101
getEstimator()7100%n/a010101
basicSuitability()4100%n/a010101
isValid()3100%n/a010101
lambda$ifValid$1(Object)2100%n/a010101
lambda$ifValid$0(Object, Object)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Call.html b/docs/coverage/test/html/neureka.backend.api/Call.html index fa49e7690..aca731dc6 100644 --- a/docs/coverage/test/html/neureka.backend.api/Call.html +++ b/docs/coverage/test/html/neureka.backend.api/Call.html @@ -1 +1 @@ -Call

Call

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total38 of 21982%2 of 1080%220438015
input(Class, int)382236%2466%2441001
Call(Tensor[], Object, List)50100%2100%020901
rearrangeInputs(int[])29100%2100%020501
get(Class)12100%n/a010201
allMetaArgs()11100%n/a010101
getValOf(Class)11100%n/a010201
getDeviceFor(Class)10100%n/a010201
to(Device)6100%n/a010101
getDerivativeIndex()6100%n/a010101
inputs()5100%n/a010101
input(int)5100%n/a010101
validate()5100%n/a010101
arity()4100%n/a010101
getDevice()3100%n/a010101
lambda$allMetaArgs$0(Arg)2100%n/a010101
\ No newline at end of file +Call

Call

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total38 of 21982%2 of 1080%220438015
input(Class, int)382236%2466%2441001
Call(Tensor[], Object, List)50100%2100%020901
rearrangeInputs(int[])29100%2100%020501
get(Class)12100%n/a010201
allMetaArgs()11100%n/a010101
getValOf(Class)11100%n/a010201
getDeviceFor(Class)10100%n/a010201
to(Device)6100%n/a010101
getDerivativeIndex()6100%n/a010101
inputs()5100%n/a010101
input(int)5100%n/a010101
validate()5100%n/a010101
arity()4100%n/a010101
getDevice()3100%n/a010101
lambda$allMetaArgs$0(Arg)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Call.java.html b/docs/coverage/test/html/neureka.backend.api/Call.java.html index 73ee65c01..58a36a8a1 100644 --- a/docs/coverage/test/html/neureka.backend.api/Call.java.html +++ b/docs/coverage/test/html/neureka.backend.api/Call.java.html @@ -341,11 +341,11 @@ public Estimator badIfAll( TensorCondition condition ) { if ( _allMatch( condition ) ) _mod(-0.5f); return this; } - public Estimator goodIfAnyNonNull( TensorCondition condition ) { return goodIfAny( t -> t != null && condition.check(t) ); } + public Estimator goodIfAnyNonNull( TensorCondition condition ) { return goodIfAny( t -> t != null && condition.check(t) ); } public Estimator goodIfAny( TensorCondition condition ) { if ( _anyMatch( condition ) ) _mod(0.5f); return this; } - public Estimator badIfAnyNonNull( TensorCondition condition ) { return badIfAny( t -> t != null && condition.check(t) ); } + public Estimator badIfAnyNonNull( TensorCondition condition ) { return badIfAny( t -> t != null && condition.check(t) ); } public Estimator badIfAny( TensorCondition condition ) { if ( _anyMatch( condition ) ) _mod(-0.5f); return this; } @@ -366,4 +366,4 @@ public interface OperationCondition { boolean check( Operation type ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.html b/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.html index 2ed609432..661076cd2 100644 --- a/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.html @@ -1 +1 @@ -DeviceAlgorithm

DeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 2564%2 of 20%241413
hasImplementationFor(Device)90%20%221111
withName(String)11100%n/a010201
getImplementationFor(Device)5100%n/a010101
\ No newline at end of file +DeviceAlgorithm

DeviceAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 2564%2 of 20%241413
hasImplementationFor(Device)90%20%221111
withName(String)11100%n/a010201
getImplementationFor(Device)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.java.html index eaca2e174..cd0c87465 100644 --- a/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.api/DeviceAlgorithm.java.html @@ -70,4 +70,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/ExecutionCall$Builder.html b/docs/coverage/test/html/neureka.backend.api/ExecutionCall$Builder.html index cbe2d6482..5d032cef7 100644 --- a/docs/coverage/test/html/neureka.backend.api/ExecutionCall$Builder.html +++ b/docs/coverage/test/html/neureka.backend.api/ExecutionCall$Builder.html @@ -1 +1 @@ -ExecutionCall.Builder

ExecutionCall.Builder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 11385%1 of 250%1611405
running(Operation)162863%1150%121501
ExecutionCall.Builder(Tensor[])24100%n/a010201
on(Device)18100%n/a010201
andArgs(Arg[])14100%n/a010201
andArgs(List)13100%n/a010301
\ No newline at end of file +ExecutionCall.Builder

ExecutionCall.Builder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 11385%1 of 250%1611405
running(Operation)162863%1150%121501
ExecutionCall.Builder(Tensor[])24100%n/a010201
on(Device)18100%n/a010201
andArgs(Arg[])14100%n/a010201
andArgs(List)13100%n/a010301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/ExecutionCall.html b/docs/coverage/test/html/neureka.backend.api/ExecutionCall.html index 1d0ef1c7e..c007ba263 100644 --- a/docs/coverage/test/html/neureka.backend.api/ExecutionCall.html +++ b/docs/coverage/test/html/neureka.backend.api/ExecutionCall.html @@ -1 +1 @@ -ExecutionCall

ExecutionCall

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total37 of 39590%6 of 1457%625350018
lambda$new$0()17934%1150%122501
lambda$new$1()161344%2250%231501
checkArity()25696%2466%240801
toString()25296%1150%120301
withArgs(Arg[])40100%n/a010501
withInputAt(int, Tensor)30100%n/a010301
withAddedInputAt(int, Tensor)29100%n/a010301
withRemovedInputAt(int)29100%n/a010301
withInputs(Tensor[])20100%n/a010201
withOperation(Operation)20100%n/a010201
ExecutionCall(Device, Operation, Tensor[], List)19100%n/a010501
of(Tensor[])12100%n/a010201
lambda$withArgs$3(Arg[], Arg)6100%n/a010101
lambda$withArgs$2(Arg, Arg)6100%n/a010101
getAlgorithm()5100%n/a010101
autogradMode()5100%n/a010101
static {...}4100%n/a010101
getOperation()3100%n/a010101
\ No newline at end of file +ExecutionCall

ExecutionCall

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total37 of 39590%6 of 1457%625350018
lambda$new$0()17934%1150%122501
lambda$new$1()161344%2250%231501
checkArity()25696%2466%240801
toString()25296%1150%120301
withArgs(Arg[])40100%n/a010501
withInputAt(int, Tensor)30100%n/a010301
withAddedInputAt(int, Tensor)29100%n/a010301
withRemovedInputAt(int)29100%n/a010301
withInputs(Tensor[])20100%n/a010201
withOperation(Operation)20100%n/a010201
ExecutionCall(Device, Operation, Tensor[], List)19100%n/a010501
of(Tensor[])12100%n/a010201
lambda$withArgs$3(Arg[], Arg)6100%n/a010101
lambda$withArgs$2(Arg, Arg)6100%n/a010101
getAlgorithm()5100%n/a010101
autogradMode()5100%n/a010101
static {...}4100%n/a010101
getOperation()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/ExecutionCall.java.html b/docs/coverage/test/html/neureka.backend.api/ExecutionCall.java.html index af4e65bba..4bc437c1d 100644 --- a/docs/coverage/test/html/neureka.backend.api/ExecutionCall.java.html +++ b/docs/coverage/test/html/neureka.backend.api/ExecutionCall.java.html @@ -262,4 +262,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Extensions.html b/docs/coverage/test/html/neureka.backend.api/Extensions.html index 434eb7803..b42acaa28 100644 --- a/docs/coverage/test/html/neureka.backend.api/Extensions.html +++ b/docs/coverage/test/html/neureka.backend.api/Extensions.html @@ -1 +1 @@ -Extensions

Extensions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 771%0 of 0n/a131313
_removeOrReject(Component)20%n/a111111
Extensions()3100%n/a010101
_setOrReject(Component)2100%n/a010101
\ No newline at end of file +Extensions

Extensions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 771%0 of 0n/a131313
_removeOrReject(Component)20%n/a111111
Extensions()3100%n/a010101
_setOrReject(Component)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Extensions.java.html b/docs/coverage/test/html/neureka.backend.api/Extensions.java.html index 0c2bcb2e6..487506daf 100644 --- a/docs/coverage/test/html/neureka.backend.api/Extensions.java.html +++ b/docs/coverage/test/html/neureka.backend.api/Extensions.java.html @@ -18,4 +18,4 @@ return newComponent; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/LazyRef.html b/docs/coverage/test/html/neureka.backend.api/LazyRef.html index ef0acadf3..8af89a328 100644 --- a/docs/coverage/test/html/neureka.backend.api/LazyRef.html +++ b/docs/coverage/test/html/neureka.backend.api/LazyRef.html @@ -1 +1 @@ -LazyRef

LazyRef

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 7959%1 of 475%1641304
toString()321633%1150%124601
get()17100%2100%020401
LazyRef(Supplier)9100%n/a010201
of(Supplier)5100%n/a010101
\ No newline at end of file +LazyRef

LazyRef

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 7959%1 of 475%1641304
toString()321633%1150%124601
get()17100%2100%020401
LazyRef(Supplier)9100%n/a010201
of(Supplier)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/LazyRef.java.html b/docs/coverage/test/html/neureka.backend.api/LazyRef.java.html index 252457fb2..86410638f 100644 --- a/docs/coverage/test/html/neureka.backend.api/LazyRef.java.html +++ b/docs/coverage/test/html/neureka.backend.api/LazyRef.java.html @@ -41,4 +41,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Operation.html b/docs/coverage/test/html/neureka.backend.api/Operation.html index 45714dbec..d06f06013 100644 --- a/docs/coverage/test/html/neureka.backend.api/Operation.html +++ b/docs/coverage/test/html/neureka.backend.api/Operation.html @@ -1 +1 @@ -Operation

Operation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total63 of 18165%3 of 2085%31662306
lambda$execute$1(ExecutionCall, Function)634240%3770%3661201
execute(Function, ExecutionCall)60100%10100%060901
setAlgorithm(Algorithm)6100%n/a010101
builder()4100%n/a010101
lambda$execute$2(Tensor)3100%n/a010101
lambda$execute$0(Function)3100%n/a010101
\ No newline at end of file +Operation

Operation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total63 of 18365%3 of 2085%31662406
lambda$execute$1(ExecutionCall, Function)634441%3770%3661301
execute(Function, ExecutionCall)60100%10100%060901
setAlgorithm(Algorithm)6100%n/a010101
builder()4100%n/a010101
lambda$execute$2(Tensor)3100%n/a010101
lambda$execute$0(Function)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Operation.java.html b/docs/coverage/test/html/neureka.backend.api/Operation.java.html index d4c6ddbe2..0437f3b1c 100644 --- a/docs/coverage/test/html/neureka.backend.api/Operation.java.html +++ b/docs/coverage/test/html/neureka.backend.api/Operation.java.html @@ -43,9 +43,11 @@ import neureka.autograd.GraphNode; import neureka.backend.api.fun.Execution; import neureka.backend.api.template.operations.OperationBuilder; +import neureka.devices.Device; import neureka.math.Function; import neureka.math.implementations.FunctionConstant; -import neureka.devices.Device; + +import java.util.List; /** * This interface is part of the backend API, and it embodies the top layer of the 3 tier backend architecture. @@ -64,7 +66,7 @@ */ public interface Operation { - static OperationBuilder builder() { return new OperationBuilder(); } + static OperationBuilder builder() { return new OperationBuilder(); } Algorithm[] getAllAlgorithms(); @@ -95,7 +97,7 @@ <T extends Algorithm> Operation setAlgorithm( Class<T> type, T instance ); default <T extends Algorithm> Operation setAlgorithm( T instance ) { - return setAlgorithm( (Class<T>) instance.getClass(), instance ); + return setAlgorithm( (Class<T>) instance.getClass(), instance ); } /** @@ -197,39 +199,41 @@ default Result execute( Function caller, ExecutionCall<?> call ) { - LazyRef<Result> ref = LazyRef.of(()->{ - int d = call.getDerivativeIndex(); - if ( d >= 0 && !caller.dependsOn(d) ) - throw new IllegalArgumentException("Cannot derive w.r.t. to input index " + d + " in function '" + caller + "', because there is no input with index "+d+"!"); - - if ( caller.getSubFunctions().stream().allMatch( f -> f instanceof FunctionConstant) ) { - if ( d < 0 ) return Result.of(Tensor.like((Tensor<Number>)call.input(0)).all(caller.call(new double[0])).mut().setIsIntermediate(true)); - else return Result.of(Tensor.like((Tensor<Number>)call.input(0)).all(0).mut().setIsIntermediate(true)); + LazyRef<Result> ref = LazyRef.of(()->{ + int d = call.getDerivativeIndex(); + if ( d >= 0 && !caller.dependsOn(d) ) + throw new IllegalArgumentException("Cannot derive w.r.t. to input index " + d + " in function '" + caller + "', because there is no input with index "+d+"!"); + + List<Function> subFunctions = caller.getSubFunctions(); + + if ( subFunctions.stream().allMatch( f -> f instanceof FunctionConstant) ) { + if ( d < 0 ) return Result.of(Tensor.like((Tensor<Number>)call.input(0)).all(caller.call(new double[0])).mut().setIsIntermediate(true)); + else return Result.of(Tensor.like((Tensor<Number>)call.input(0)).all(0).mut().setIsIntermediate(true)); } - Result result = call.getAlgorithm().execute( caller, call ); - if ( result != null ) return result; - throw new IllegalStateException( - "Missing return value of " + Execution.class.getSimpleName() + " in algorithm '" + - call.getAlgorithm().getClass().getSimpleName() + "' in operation '" + - call.getOperation().getClass().getName()+"'" + Result result = call.getAlgorithm().execute( caller, call ); + if ( result != null ) return result; + throw new IllegalStateException( + "Missing return value of " + Execution.class.getSimpleName() + " in algorithm '" + + call.getAlgorithm().getClass().getSimpleName() + "' in operation '" + + call.getOperation().getClass().getName()+"'" ); }); - for ( Tensor<?> t : call.inputs() ) - if ( !t.graphNode().isPresent() ) - new GraphNode<>( caller, null, () -> Result.of(t) ); + for ( Tensor<?> t : call.inputs() ) + if ( !t.graphNode().isPresent() ) + new GraphNode<>( caller, null, () -> Result.of(t) ); - if ( caller.isFlat() ) + if ( caller.isFlat() ) { - call.checkArity(); + call.checkArity(); /* The following code is reached in flat functions only: Autograd-Graph will be generated below for the new GraphNode: only flat functions can be executed directly */ - if ( call.getDerivativeIndex() < 0 && caller.isDoingAD() ) - new GraphNode<>( caller, (ExecutionCall<Device<?>>) call, ref::get ); + if ( call.getDerivativeIndex() < 0 && caller.isDoingAD() ) + new GraphNode<>( caller, (ExecutionCall<Device<?>>) call, ref::get ); } - return ref.get(); + return ref.get(); } /** @@ -261,4 +265,4 @@ double calculate( double[] inputs, int j, int d, Function[] src ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Result.html b/docs/coverage/test/html/neureka.backend.api/Result.html index 6b717d001..4c25e6429 100644 --- a/docs/coverage/test/html/neureka.backend.api/Result.html +++ b/docs/coverage/test/html/neureka.backend.api/Result.html @@ -1 +1 @@ -Result

Result

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 6091%1 of 250%1811307
withAutoDiff(ADActionSupplier)51676%1150%121401
of(Tensor)16100%n/a010201
Result(Tensor, ADActionSupplier)9100%n/a010401
withADAction(ADAction)5100%n/a010101
get()3100%n/a010101
getAgentSupplier()3100%n/a010101
lambda$withADAction$0(ADAction, Function, ExecutionCall)3100%n/a010101
\ No newline at end of file +Result

Result

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 6091%1 of 250%1811307
withAutoDiff(ADActionSupplier)51676%1150%121401
of(Tensor)16100%n/a010201
Result(Tensor, ADActionSupplier)9100%n/a010401
withADAction(ADAction)5100%n/a010101
get()3100%n/a010101
getAgentSupplier()3100%n/a010101
lambda$withADAction$0(ADAction, Function, ExecutionCall)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/Result.java.html b/docs/coverage/test/html/neureka.backend.api/Result.java.html index 08f24e593..be39c5256 100644 --- a/docs/coverage/test/html/neureka.backend.api/Result.java.html +++ b/docs/coverage/test/html/neureka.backend.api/Result.java.html @@ -41,4 +41,4 @@ public ADActionSupplier getAgentSupplier() { return _agent; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/index.html b/docs/coverage/test/html/neureka.backend.api/index.html index f271ea5b7..e7ac91193 100644 --- a/docs/coverage/test/html/neureka.backend.api/index.html +++ b/docs/coverage/test/html/neureka.backend.api/index.html @@ -1 +1 @@ -neureka.backend.api

neureka.backend.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total423 of 2,34381%62 of 19868%602454937614146020
Call.Validator11835675%295967%2368167752401
Operation6311865%31785%3166230601
BackendContext5534186%61672%83577222401
Call.Validator.Estimator418767%91359%92441641301
Call3818182%2880%22043801501
ExecutionCall3735890%6857%62535001801
LazyRef324759%1375%164130401
ExecutionCall.Builder169785%1150%161140501
DeviceAlgorithm91664%20%24141301
Result55591%1150%181130701
BackendContext.new LoadingContext() {...}41680%n/a13131301
BackendContext.Runner4593%1150%160150501
Extensions571%n/a13131301
Call.Builder82100%2100%060110501
AutoDiffMode47100%1787%17070301
BackendContext.new ImplementationReceiver() {...}24100%n/a02030201
BackendContext.Registered18100%n/a01060101
BackendExtension.DeviceOption15100%n/a03060301
Algorithm11100%n/a01020101
BackendExtension100%n/a01010101
\ No newline at end of file +neureka.backend.api

neureka.backend.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total433 of 2,36281%64 of 19867%642455137716146020
Call.Validator11835675%295967%2368167752401
Operation6312065%31785%3166240601
BackendContext6133584%61672%93587232401
Call.Validator.Estimator418767%111150%112441641301
Call3818182%2880%22043801501
ExecutionCall3735890%6857%62535001801
LazyRef324759%1375%164130401
ExecutionCall.Builder169785%1150%161140501
DeviceAlgorithm91664%20%24141301
BackendContext.new LoadingContext() {...}81260%n/a23232301
Result55591%1150%181130701
BackendContext.Runner4593%1150%160150501
Extensions571%n/a13131301
Call.Builder82100%2100%060110501
AutoDiffMode64100%1787%17070301
BackendContext.new ImplementationReceiver() {...}24100%n/a02030201
BackendContext.Registered18100%n/a01060101
BackendExtension.DeviceOption15100%n/a03060301
Algorithm11100%n/a01020101
BackendExtension100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.api/index.source.html b/docs/coverage/test/html/neureka.backend.api/index.source.html index 80f7d99f7..69b1b72f2 100644 --- a/docs/coverage/test/html/neureka.backend.api/index.source.html +++ b/docs/coverage/test/html/neureka.backend.api/index.source.html @@ -1 +1 @@ -neureka.backend.api

neureka.backend.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total423 of 2,34381%62 of 19868%602454937614146020
Call.java19770678%408267%341182414295704
Operation.java6311865%31785%3166230601
BackendContext.java6244487%71770%104789833505
ExecutionCall.java5345589%7956%73146402302
LazyRef.java324759%375%164130401
DeviceAlgorithm.java91664%20%24141301
Result.java5591%50%181130701
Extensions.java71%n/a13131301
AutoDiffMode.java47100%787%17070301
BackendExtension.java16100%n/a04070402
Algorithm.java11100%n/a01020101
\ No newline at end of file +neureka.backend.api

neureka.backend.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total433 of 2,36281%64 of 19867%642455137716146020
Call.java19770678%428065%361182414295704
BackendContext.java7243485%71770%1247109853505
Operation.java6312065%31785%3166240601
ExecutionCall.java5345589%7956%73146402302
LazyRef.java324759%375%164130401
DeviceAlgorithm.java91664%20%24141301
Result.java5591%50%181130701
Extensions.java71%n/a13131301
AutoDiffMode.java64100%787%17070301
BackendExtension.java16100%n/a04070402
Algorithm.java11100%n/a01020101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.html b/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.html index 995e33e52..5a83f1b19 100644 --- a/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.html +++ b/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.html @@ -1 +1 @@ -CPUBackend

CPUBackend

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 75698%1 of 683%2821118179
find(String)63083%1583%140401
dispose()0%n/a111111
_load(ReceiveForDevice)368100%n/a01011101
lambda$getLoader$0(BackendRegistry)6100%n/a010101
lambda$_load$73(LoadingContext)5100%n/a010101
lambda$_load$72(LoadingContext)5100%n/a010101
lambda$_load$71(LoadingContext)5100%n/a010101
lambda$_load$70(LoadingContext)5100%n/a010101
lambda$_load$69(LoadingContext)5100%n/a010101
lambda$_load$68(LoadingContext)5100%n/a010101
lambda$_load$67(LoadingContext)5100%n/a010101
lambda$_load$66(LoadingContext)5100%n/a010101
lambda$_load$65(LoadingContext)5100%n/a010101
lambda$_load$64(LoadingContext)5100%n/a010101
lambda$_load$63(LoadingContext)5100%n/a010101
lambda$_load$62(LoadingContext)5100%n/a010101
lambda$_load$61(LoadingContext)5100%n/a010101
lambda$_load$60(LoadingContext)5100%n/a010101
lambda$_load$59(LoadingContext)5100%n/a010101
lambda$_load$58(LoadingContext)5100%n/a010101
lambda$_load$57(LoadingContext)5100%n/a010101
lambda$_load$56(LoadingContext)5100%n/a010101
lambda$_load$55(LoadingContext)5100%n/a010101
lambda$_load$54(LoadingContext)5100%n/a010101
lambda$_load$53(LoadingContext)5100%n/a010101
lambda$_load$52(LoadingContext)5100%n/a010101
lambda$_load$51(LoadingContext)5100%n/a010101
lambda$_load$50(LoadingContext)5100%n/a010101
lambda$_load$49(LoadingContext)5100%n/a010101
lambda$_load$48(LoadingContext)5100%n/a010101
lambda$_load$47(LoadingContext)5100%n/a010101
lambda$_load$46(LoadingContext)5100%n/a010101
lambda$_load$45(LoadingContext)5100%n/a010101
lambda$_load$44(LoadingContext)5100%n/a010101
lambda$_load$43(LoadingContext)5100%n/a010101
lambda$_load$41(LoadingContext)5100%n/a010101
lambda$_load$40(LoadingContext)5100%n/a010101
lambda$_load$39(LoadingContext)5100%n/a010101
lambda$_load$38(LoadingContext)5100%n/a010101
lambda$_load$37(LoadingContext)5100%n/a010101
lambda$_load$36(LoadingContext)5100%n/a010101
lambda$_load$35(LoadingContext)5100%n/a010101
lambda$_load$34(LoadingContext)5100%n/a010101
lambda$_load$33(LoadingContext)5100%n/a010101
lambda$_load$32(LoadingContext)5100%n/a010101
lambda$_load$31(LoadingContext)5100%n/a010101
lambda$_load$30(LoadingContext)5100%n/a010101
lambda$_load$29(LoadingContext)5100%n/a010101
lambda$_load$28(LoadingContext)5100%n/a010101
lambda$_load$42(LoadingContext)4100%n/a010101
lambda$_load$27(LoadingContext)4100%n/a010101
lambda$_load$26(LoadingContext)4100%n/a010101
lambda$_load$25(LoadingContext)4100%n/a010101
lambda$_load$24(LoadingContext)4100%n/a010101
lambda$_load$23(LoadingContext)4100%n/a010101
lambda$_load$22(LoadingContext)4100%n/a010101
lambda$_load$21(LoadingContext)4100%n/a010101
lambda$_load$20(LoadingContext)4100%n/a010101
lambda$_load$19(LoadingContext)4100%n/a010101
lambda$_load$18(LoadingContext)4100%n/a010101
lambda$_load$17(LoadingContext)4100%n/a010101
lambda$_load$16(LoadingContext)4100%n/a010101
lambda$_load$15(LoadingContext)4100%n/a010101
lambda$_load$14(LoadingContext)4100%n/a010101
lambda$_load$13(LoadingContext)4100%n/a010101
lambda$_load$12(LoadingContext)4100%n/a010101
lambda$_load$11(LoadingContext)4100%n/a010101
lambda$_load$10(LoadingContext)4100%n/a010101
lambda$_load$9(LoadingContext)4100%n/a010101
lambda$_load$8(LoadingContext)4100%n/a010101
lambda$_load$7(LoadingContext)4100%n/a010101
lambda$_load$6(LoadingContext)4100%n/a010101
lambda$_load$5(LoadingContext)4100%n/a010101
lambda$_load$4(LoadingContext)4100%n/a010101
lambda$_load$3(LoadingContext)4100%n/a010101
lambda$_load$2(LoadingContext)4100%n/a010101
lambda$_load$1(LoadingContext)4100%n/a010101
CPUBackend()100%n/a010101
getLoader()100%n/a010101
\ No newline at end of file +CPUBackend

CPUBackend

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 75698%1 of 683%2821118179
find(String)63083%1583%140401
dispose()0%n/a111111
_load(ReceiveForDevice)368100%n/a01011101
lambda$getLoader$0(BackendRegistry)6100%n/a010101
lambda$_load$73(LoadingContext)5100%n/a010101
lambda$_load$72(LoadingContext)5100%n/a010101
lambda$_load$71(LoadingContext)5100%n/a010101
lambda$_load$70(LoadingContext)5100%n/a010101
lambda$_load$69(LoadingContext)5100%n/a010101
lambda$_load$68(LoadingContext)5100%n/a010101
lambda$_load$67(LoadingContext)5100%n/a010101
lambda$_load$66(LoadingContext)5100%n/a010101
lambda$_load$65(LoadingContext)5100%n/a010101
lambda$_load$64(LoadingContext)5100%n/a010101
lambda$_load$63(LoadingContext)5100%n/a010101
lambda$_load$62(LoadingContext)5100%n/a010101
lambda$_load$61(LoadingContext)5100%n/a010101
lambda$_load$60(LoadingContext)5100%n/a010101
lambda$_load$59(LoadingContext)5100%n/a010101
lambda$_load$58(LoadingContext)5100%n/a010101
lambda$_load$57(LoadingContext)5100%n/a010101
lambda$_load$56(LoadingContext)5100%n/a010101
lambda$_load$55(LoadingContext)5100%n/a010101
lambda$_load$54(LoadingContext)5100%n/a010101
lambda$_load$53(LoadingContext)5100%n/a010101
lambda$_load$52(LoadingContext)5100%n/a010101
lambda$_load$51(LoadingContext)5100%n/a010101
lambda$_load$50(LoadingContext)5100%n/a010101
lambda$_load$49(LoadingContext)5100%n/a010101
lambda$_load$48(LoadingContext)5100%n/a010101
lambda$_load$47(LoadingContext)5100%n/a010101
lambda$_load$46(LoadingContext)5100%n/a010101
lambda$_load$45(LoadingContext)5100%n/a010101
lambda$_load$44(LoadingContext)5100%n/a010101
lambda$_load$43(LoadingContext)5100%n/a010101
lambda$_load$41(LoadingContext)5100%n/a010101
lambda$_load$40(LoadingContext)5100%n/a010101
lambda$_load$39(LoadingContext)5100%n/a010101
lambda$_load$38(LoadingContext)5100%n/a010101
lambda$_load$37(LoadingContext)5100%n/a010101
lambda$_load$36(LoadingContext)5100%n/a010101
lambda$_load$35(LoadingContext)5100%n/a010101
lambda$_load$34(LoadingContext)5100%n/a010101
lambda$_load$33(LoadingContext)5100%n/a010101
lambda$_load$32(LoadingContext)5100%n/a010101
lambda$_load$31(LoadingContext)5100%n/a010101
lambda$_load$30(LoadingContext)5100%n/a010101
lambda$_load$29(LoadingContext)5100%n/a010101
lambda$_load$28(LoadingContext)5100%n/a010101
lambda$_load$42(LoadingContext)4100%n/a010101
lambda$_load$27(LoadingContext)4100%n/a010101
lambda$_load$26(LoadingContext)4100%n/a010101
lambda$_load$25(LoadingContext)4100%n/a010101
lambda$_load$24(LoadingContext)4100%n/a010101
lambda$_load$23(LoadingContext)4100%n/a010101
lambda$_load$22(LoadingContext)4100%n/a010101
lambda$_load$21(LoadingContext)4100%n/a010101
lambda$_load$20(LoadingContext)4100%n/a010101
lambda$_load$19(LoadingContext)4100%n/a010101
lambda$_load$18(LoadingContext)4100%n/a010101
lambda$_load$17(LoadingContext)4100%n/a010101
lambda$_load$16(LoadingContext)4100%n/a010101
lambda$_load$15(LoadingContext)4100%n/a010101
lambda$_load$14(LoadingContext)4100%n/a010101
lambda$_load$13(LoadingContext)4100%n/a010101
lambda$_load$12(LoadingContext)4100%n/a010101
lambda$_load$11(LoadingContext)4100%n/a010101
lambda$_load$10(LoadingContext)4100%n/a010101
lambda$_load$9(LoadingContext)4100%n/a010101
lambda$_load$8(LoadingContext)4100%n/a010101
lambda$_load$7(LoadingContext)4100%n/a010101
lambda$_load$6(LoadingContext)4100%n/a010101
lambda$_load$5(LoadingContext)4100%n/a010101
lambda$_load$4(LoadingContext)4100%n/a010101
lambda$_load$3(LoadingContext)4100%n/a010101
lambda$_load$2(LoadingContext)4100%n/a010101
lambda$_load$1(LoadingContext)4100%n/a010101
CPUBackend()100%n/a010101
getLoader()100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.java.html b/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.java.html index 9afe688e9..d3ab93a2a 100644 --- a/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.java.html +++ b/docs/coverage/test/html/neureka.backend.cpu/CPUBackend.java.html @@ -166,4 +166,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.cpu/index.html b/docs/coverage/test/html/neureka.backend.cpu/index.html index efb6fe1b3..b3c9bb387 100644 --- a/docs/coverage/test/html/neureka.backend.cpu/index.html +++ b/docs/coverage/test/html/neureka.backend.cpu/index.html @@ -1 +1 @@ -neureka.backend.cpu

neureka.backend.cpu

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total9 of 75698%1 of 683%282111817901
CPUBackend974798%1583%282111817901
\ No newline at end of file +neureka.backend.cpu

neureka.backend.cpu

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total9 of 75698%1 of 683%282111817901
CPUBackend974798%1583%282111817901
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.cpu/index.source.html b/docs/coverage/test/html/neureka.backend.cpu/index.source.html index e4ba29a0a..53a0bb51c 100644 --- a/docs/coverage/test/html/neureka.backend.cpu/index.source.html +++ b/docs/coverage/test/html/neureka.backend.cpu/index.source.html @@ -1 +1 @@ -neureka.backend.cpu

neureka.backend.cpu

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total9 of 75698%1 of 683%282111817901
CPUBackend.java974798%1583%282111817901
\ No newline at end of file +neureka.backend.cpu

neureka.backend.cpu

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total9 of 75698%1 of 683%282111817901
CPUBackend.java974798%1583%282111817901
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.html b/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.html index 9b01aa0c8..c170ad0c9 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.html @@ -1 +1 @@ -BiElementwise

BiElementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 10597%1 of 475%1922707
lambda$new$5(ExecutionCall)35694%1375%1321301
BiElementwise()20100%n/a010601
lambda$new$1(ExecutionCall)10100%n/a010601
lambda$new$4(Function, ExecutionCall)6100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$3(ExecutionCall)3100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
\ No newline at end of file +BiElementwise

BiElementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 10897%1 of 475%1922807
_prepare(ExecutionCall)35895%1375%1321401
BiElementwise()21100%n/a010601
lambda$new$1(ExecutionCall)10100%n/a010601
lambda$new$4(Function, ExecutionCall)6100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$3(ExecutionCall)3100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.java.html index 30c020d8d..21f60ebb5 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/BiElementwise.java.html @@ -2,6 +2,7 @@ import neureka.Tensor; import neureka.backend.api.AutoDiffMode; +import neureka.backend.api.ExecutionCall; import neureka.backend.api.Result; import neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm; import neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm; @@ -12,45 +13,52 @@ public final class BiElementwise extends AbstractFunDeviceAlgorithm<BiElementwise> { public BiElementwise() { - super("elementwise"); - setIsSuitableFor( - call -> call - .validate() - .allNotNullHaveSame(NDimensional::size) - .allNotNullHaveSame(NDimensional::shape) - .allNotNull( t -> t.getDataType().typeClassImplements( NumericType.class ) ) - .basicSuitability() + super("elementwise"); + setIsSuitableFor( + call -> call + .validate() + .allNotNullHaveSame(NDimensional::size) + .allNotNullHaveSame(NDimensional::shape) + .allNotNull( t -> t.getDataType().typeClassImplements( NumericType.class ) ) + .basicSuitability() ); - setAutogradModeFor( call -> AutoDiffMode.FORWARD_AND_BACKWARD ); - setExecution( + setAutogradModeFor( call -> AutoDiffMode.FORWARD_AND_BACKWARD ); + setExecution( (outerCaller, outerCall) -> - Result.of(AbstractDeviceAlgorithm.executeFor( + Result.of(AbstractDeviceAlgorithm.executeFor( outerCaller, outerCall, - innerCall -> AbstractDeviceAlgorithm.executeDeviceAlgorithm( innerCall ) + innerCall -> AbstractDeviceAlgorithm.executeDeviceAlgorithm( innerCall ) )) + //(outerCaller, outerCall) -> { + // ExecutionCall<? extends Device<?>> finalOuterCall = _prepare(outerCall); + // return Result.of(executeOnCommonDevice(finalOuterCall, ()->{ + // return AbstractDeviceAlgorithm.executeDeviceAlgorithm(finalOuterCall); + // })); + //} ); - setCallPreparation( - call -> { - if ( call.arity() < 3 ) call = call.withAddedInputAt(0, null); - Device<Object> device = (Device<Object>) call.getDevice(); - if ( call.input( 0 ) == null ) // Creating a new tensor: - { - int[] outShape = call.input( 1 ).getNDConf().shape(); + setCallPreparation(this::_prepare); + } - Class<Object> type = (Class<Object>) call.input( 1 ).getItemType(); - Tensor<Object> output = Tensor.of( type ).withShape( outShape ).all( 0.0 ).mut().setIsIntermediate( true ); - output.mut().setIsVirtual( false ); - try { - device.store( output ); - } catch( Exception e ) { - e.printStackTrace(); - } - call = call.withInputAt( 0, output ); - } - return call; - } - ); - } + private ExecutionCall<?> _prepare( final ExecutionCall<?> inputCall ) { + ExecutionCall<?> call = inputCall; + if ( call.arity() < 3 ) call = call.withAddedInputAt(0, null); + Device<Object> device = (Device<Object>) call.getDevice(); + if ( call.input( 0 ) == null ) // Creating a new tensor: + { + int[] outShape = call.input( 1 ).getNDConf().shape(); + + Class<Object> type = (Class<Object>) call.input( 1 ).getItemType(); + Tensor<Object> output = Tensor.of( type ).withShape( outShape ).all( 0.0 ).mut().setIsIntermediate( true ); + output.mut().setIsVirtual( false ); + try { + device.store( output ); + } catch( Exception e ) { + e.printStackTrace(); + } + call = call.withInputAt( 0, output ); + } + return call; + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.html b/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.html index a637c30f8..47a2f4bb3 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.html @@ -1 +1 @@ -BiScalarBroadcast

BiScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 15796%6 of 2676%62002507
lambda$new$4(ExecutionCall)46093%1583%1401101
lambda$new$2(Tensor[])15998%51575%5110401
BiScalarBroadcast()16100%n/a010501
lambda$new$3(ExecutionCall)9100%n/a010401
lambda$new$1(Tensor)5100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +BiScalarBroadcast

BiScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 15796%6 of 2676%62002507
lambda$new$4(ExecutionCall)46093%1583%1401101
lambda$new$2(Tensor[])15998%51575%5110401
BiScalarBroadcast()16100%n/a010501
lambda$new$3(ExecutionCall)9100%n/a010401
lambda$new$1(Tensor)5100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.java.html index a0aacb9d5..0a9372e61 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/BiScalarBroadcast.java.html @@ -47,4 +47,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.html b/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.html index c1af2b38f..f2cb6e789 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.html @@ -1 +1 @@ -Broadcast

Broadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 34692%6 of 4887%83345329
lambda$new$3(ExecutionCall)100%n/a114411
lambda$new$6(ExecutionCall)918895%51777%51202401
lambda$new$2(Tensor, Tensor)60%n/a111111
lambda$new$1(ExecutionCall)29797%12596%11401601
Broadcast()20100%n/a010601
lambda$new$5(Function, ExecutionCall)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$4(ExecutionCall)3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +Broadcast

Broadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 34692%6 of 4887%83345329
lambda$new$3(ExecutionCall)100%n/a114411
lambda$new$6(ExecutionCall)918895%51777%51202401
lambda$new$2(Tensor, Tensor)60%n/a111111
lambda$new$1(ExecutionCall)29797%12596%11401601
Broadcast()20100%n/a010601
lambda$new$5(Function, ExecutionCall)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$4(ExecutionCall)3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.java.html index 23892eb8f..f4a9c3257 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/Broadcast.java.html @@ -99,4 +99,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.html b/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.html index a62c3f193..aab1fa532 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.html @@ -1 +1 @@ -DotProductAlgorithm

DotProductAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total51 of 45588%16 of 4866%1644478120
_withDimTrim(ExecutionCall)175977%5758%470801
_autoClone(ExecutionCall)142058%2466%242701
_toInline(Tensor, NDConfiguration.Layout)63986%1583%141701
lambda$new$7(Function, ExecutionCall)53988%1150%121701
_prepare(ExecutionCall)42787%3350%340601
lambda$new$9(ExecutionCall)20%n/a111111
_checkAndPrepareLayout(ExecutionCall, Tensor)17698%1583%1401901
_isSimpleSymmetric(Tensor)11292%2250%230101
lambda$new$2(Tensor)1990%1150%120101
_withNewOutput(ExecutionCall)37100%n/a010501
DotProductAlgorithm()20100%n/a010601
lambda$new$6(Function, Tensor, ADTarget)14100%n/a010101
lambda$new$4(ExecutionCall)13100%n/a010601
lambda$new$8(Function, ExecutionCall)9100%n/a010401
lambda$new$1(Integer)8100%2100%020101
isSymmetric(NDConfiguration.Layout)7100%2100%020101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$3(Tensor)4100%n/a010101
static {...}4100%n/a010101
lambda$new$5(ExecutionCall)2100%n/a010101
\ No newline at end of file +DotProductAlgorithm

DotProductAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total51 of 45588%16 of 4866%1644478120
_withDimTrim(ExecutionCall)175977%5758%470801
_autoClone(ExecutionCall)142058%2466%242701
_toInline(Tensor, NDConfiguration.Layout)63986%1583%141701
lambda$new$7(Function, ExecutionCall)53988%1150%121701
_prepare(ExecutionCall)42787%3350%340601
lambda$new$9(ExecutionCall)20%n/a111111
_checkAndPrepareLayout(ExecutionCall, Tensor)17698%1583%1401901
_isSimpleSymmetric(Tensor)11292%2250%230101
lambda$new$2(Tensor)1990%1150%120101
_withNewOutput(ExecutionCall)37100%n/a010501
DotProductAlgorithm()20100%n/a010601
lambda$new$6(Function, Tensor, ADTarget)14100%n/a010101
lambda$new$4(ExecutionCall)13100%n/a010601
lambda$new$8(Function, ExecutionCall)9100%n/a010401
lambda$new$1(Integer)8100%2100%020101
isSymmetric(NDConfiguration.Layout)7100%2100%020101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$3(Tensor)4100%n/a010101
static {...}4100%n/a010101
lambda$new$5(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.java.html index 6ef6bbabd..9ba30dcb2 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/DotProductAlgorithm.java.html @@ -174,4 +174,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.html index 8c4c5893e..949a9c6f7 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.html @@ -1 +1 @@ -ElementwiseAlgorithm

ElementwiseAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 11094%2 of 450%31002518
lambda$new$2(Tensor, Tensor)60%n/a111111
lambda$new$6(ExecutionCall)55100%2250%2301001
ElementwiseAlgorithm()20100%n/a010601
lambda$new$3(ExecutionCall)10100%n/a010401
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$5(Function, ExecutionCall)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$4(ExecutionCall)3100%n/a010101
\ No newline at end of file +ElementwiseAlgorithm

ElementwiseAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 11094%2 of 450%31002518
lambda$new$2(Tensor, Tensor)60%n/a111111
lambda$new$6(ExecutionCall)55100%2250%2301001
ElementwiseAlgorithm()20100%n/a010601
lambda$new$3(ExecutionCall)10100%n/a010401
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$5(Function, ExecutionCall)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$4(ExecutionCall)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.java.html index 7f1868870..9378cb177 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ElementwiseAlgorithm.java.html @@ -55,4 +55,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.html b/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.html index 98c55fbf4..6bb12ea2a 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.html @@ -1 +1 @@ -MatMulAlgorithm

MatMulAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 44395%9 of 4379%940273018
_prepare(ExecutionCall)91967%3350%340501
lambda$new$7(Function, ExecutionCall)54088%1150%121701
lambda$new$6(int, Function, Tensor, ADTarget)53487%1266%131701
_checkAndPrepareLayout(ExecutionCall, Tensor)213898%1990%1602201
_withNewOutput(ExecutionCall)51100%n/a010601
_autoClone(ExecutionCall)39100%1787%150701
MatMulAlgorithm()20100%n/a010601
_isSimpleColumnMajorMatrix(Tensor)13100%1375%130101
_isSimpleRowMajorMatrix(Tensor)13100%1375%130101
lambda$new$3(ExecutionCall)11100%n/a010601
isRMOrCM(NDConfiguration.Layout)10100%4100%030101
lambda$new$8(Function, ExecutionCall)8100%n/a010201
lambda$new$2(Tensor)8100%2100%020101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$1(Tensor)4100%n/a010101
static {...}4100%n/a010101
lambda$new$5(ExecutionCall)3100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
\ No newline at end of file +MatMulAlgorithm

MatMulAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 43996%9 of 4379%939272017
lambda$new$6(Function, ExecutionCall)54088%1150%121701
lambda$new$5(int, Function, Tensor, ADTarget)53487%1266%131701
_prepare(ExecutionCall)42485%3350%340501
_checkAndPrepareLayout(ExecutionCall, Tensor)213898%1990%1602201
_withNewOutput(ExecutionCall)51100%n/a010601
_autoClone(ExecutionCall)39100%1787%150701
MatMulAlgorithm()20100%n/a010601
_isSimpleColumnMajorMatrix(Tensor)13100%1375%130101
_isSimpleRowMajorMatrix(Tensor)13100%1375%130101
lambda$new$3(ExecutionCall)11100%n/a010601
isRMOrCM(NDConfiguration.Layout)10100%4100%030101
lambda$new$2(Tensor)8100%2100%020101
lambda$new$7(Function, ExecutionCall)7100%n/a010201
lambda$new$0(Tensor)5100%n/a010101
lambda$new$1(Tensor)4100%n/a010101
static {...}4100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.java.html index 1711ec0bb..970245b73 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/MatMulAlgorithm.java.html @@ -33,99 +33,96 @@ setAutogradModeFor( call -> AutoDiffMode.BACKWARD_ONLY ); setExecution( (outerCaller, outerCall) -> - Result.of(AbstractDeviceAlgorithm.executeFor( - outerCaller, outerCall, - innerCall -> AbstractDeviceAlgorithm.executeDeviceAlgorithm( innerCall ) - )) - .withAutoDiff( (Function f, ExecutionCall<? extends Device<?>> adCall ) -> + Result.of(AbstractDeviceAlgorithm.executeDeviceAlgorithm(_prepare(outerCall))) + .withAutoDiff( (Function f, ExecutionCall<? extends Device<?>> adCall ) -> { - if ( adCall.autogradMode().allowsForward() ) - throw new IllegalArgumentException("Matrix multiplication does not support forward-AD!"); - Function matMul = Neureka.get().backend().getFunction().matMul(); - int d = ( 1 + adCall.getValOf( Arg.DerivIdx.class ) ) % 2; - Tensor<?> derivative = Util.transpose(adCall.input( d )).deepCopy().mut().setIsIntermediate( true ); // We need to clone it to make it have a simple nd configuration... - derivative.to(adCall.getDevice()); - return ADAction.of(target -> { + if ( adCall.autogradMode().allowsForward() ) + throw new IllegalArgumentException("Matrix multiplication does not support forward-AD!"); + Function matMul = Neureka.get().backend().getFunction().matMul(); + int d = ( 1 + adCall.getValOf( Arg.DerivIdx.class ) ) % 2; + Tensor<?> derivative = Util.transpose(adCall.input( d )).deepCopy().mut().setIsIntermediate( true ); // We need to clone it to make it have a simple nd configuration... + derivative.to(adCall.getDevice()); + return ADAction.of(target -> { Tensor<?> result; - switch ( d ) { + switch ( d ) { case 0: - result = matMul.execute(derivative, target.error()); - break; + result = matMul.execute(derivative, target.error()); + break; case 1: - result = matMul.execute(target.error(), derivative); - break; + result = matMul.execute(target.error(), derivative); + break; default: - throw new IllegalStateException("This should never happen!"); + throw new IllegalStateException("This should never happen!"); } - return result; + return result; }); }) ); - setCallPreparation(MatMulAlgorithm::_prepare); - } + setCallPreparation(MatMulAlgorithm::_prepare); + } private static ExecutionCall<Device<Object>> _prepare( ExecutionCall<?> call ) { - assert call.arity() <= 3; - if ( call.arity() == 2 ) call = call.withAddedInputAt(0, null); - if ( call.input( 0 ) == null ) // Creating a new tensor: - call = _withNewOutput( call ); + assert call.arity() <= 3; + if ( call.arity() == 2 ) call = call.withAddedInputAt(0, null); + if ( call.input( 0 ) == null ) // Creating a new tensor: + call = _withNewOutput( call ); - return (ExecutionCall<Device<Object>>) _autoClone( call ); + return (ExecutionCall<Device<Object>>) _autoClone( call ); } private static ExecutionCall<?> _withNewOutput( ExecutionCall<?> call ) { - Class<Number> type = (Class<Number>) call.input( 1 ).getDataType().getItemTypeClass(); + Class<Number> type = (Class<Number>) call.input( 1 ).getDataType().getItemTypeClass(); - int[] shp = new int[]{ call.input( 1 ).shape(0), call.input( 2 ).shape(1) }; - Tensor<Number> output = Tensor.of( type ).withShape( shp ).all( 0 ).mut().setIsIntermediate( true ); + int[] shp = new int[]{ call.input( 1 ).shape(0), call.input( 2 ).shape(1) }; + Tensor<Number> output = Tensor.of( type ).withShape( shp ).all( 0 ).mut().setIsIntermediate( true ); - call = _checkAndPrepareLayout( call, output ); + call = _checkAndPrepareLayout( call, output ); - call.getDeviceFor(Number.class).store( output ); - return call.withInputAt( 0, output ); + call.getDeviceFor(Number.class).store( output ); + return call.withInputAt( 0, output ); } private static ExecutionCall<?> _checkAndPrepareLayout( ExecutionCall<?> call, Tensor<?> c ) { - Tensor<?> a = call.input( 1 ); - Tensor<?> b = call.input( 2 ); + Tensor<?> a = call.input( 1 ); + Tensor<?> b = call.input( 2 ); // We need to make sure that the matrices have a common/compatible layout, // ..before we can before the actual a @ b = c matrix multiplication! - NDConfiguration.Layout layoutA = a.getNDConf().getLayout(); - NDConfiguration.Layout layoutB = b.getNDConf().getLayout(); - NDConfiguration.Layout layoutC = c.getNDConf().getLayout(); - - boolean aIsCompatible = isRMOrCM( layoutA ); - boolean bIsCompatible = isRMOrCM( layoutB ); - - Function relayout = Neureka.get().backend().getFunction().relayout(); - - if ( aIsCompatible ) { - if ( layoutB != NDConfiguration.Layout.SYMMETRIC ) - b = relayout.with(Arg.Layout.of(layoutA)).call(b); // We choose a valid layout based on a - layoutC = layoutA; - } else if ( bIsCompatible ) { - if ( layoutA != NDConfiguration.Layout.SYMMETRIC ) - a = relayout.with(Arg.Layout.of(layoutB)).call(a); // We choose a valid layout based on b - layoutC = layoutB; + NDConfiguration.Layout layoutA = a.getNDConf().getLayout(); + NDConfiguration.Layout layoutB = b.getNDConf().getLayout(); + NDConfiguration.Layout layoutC = c.getNDConf().getLayout(); + + boolean aIsCompatible = isRMOrCM( layoutA ); + boolean bIsCompatible = isRMOrCM( layoutB ); + + Function relayout = Neureka.get().backend().getFunction().relayout(); + + if ( aIsCompatible ) { + if ( layoutB != NDConfiguration.Layout.SYMMETRIC ) + b = relayout.with(Arg.Layout.of(layoutA)).call(b); // We choose a valid layout based on a + layoutC = layoutA; + } else if ( bIsCompatible ) { + if ( layoutA != NDConfiguration.Layout.SYMMETRIC ) + a = relayout.with(Arg.Layout.of(layoutB)).call(a); // We choose a valid layout based on b + layoutC = layoutB; } else { // Ok so the inputs are unspecific/symmetric/ (not RM or CM) // So we just need to decide on any valid layout really: - layoutC = isRMOrCM(layoutC) ? layoutC : NDConfiguration.Layout.ROW_MAJOR; - a = relayout.with(Arg.Layout.of(layoutC)).call(a); - b = relayout.with(Arg.Layout.of(layoutC)).call(b); + layoutC = isRMOrCM(layoutC) ? layoutC : NDConfiguration.Layout.ROW_MAJOR; + a = relayout.with(Arg.Layout.of(layoutC)).call(a); + b = relayout.with(Arg.Layout.of(layoutC)).call(b); } - c.mut().toLayout( layoutC ); - c.mut().setIsVirtual( false ); // This statement is after the layout conversion for performance reasons (virtual tensors barely need copying). + c.mut().toLayout( layoutC ); + c.mut().setIsVirtual( false ); // This statement is after the layout conversion for performance reasons (virtual tensors barely need copying). - return call.withInputAt( 1, a ).withInputAt( 2, b ); + return call.withInputAt( 1, a ).withInputAt( 2, b ); } private static boolean isRMOrCM(NDConfiguration.Layout layout ) { - return layout == NDConfiguration.Layout.ROW_MAJOR || + return layout == NDConfiguration.Layout.ROW_MAJOR || layout == NDConfiguration.Layout.COLUMN_MAJOR; } @@ -139,14 +136,14 @@ * @param call The execution call whose tensors ought to be cloned based on the complexity of their access patterns. */ private static ExecutionCall<?> _autoClone( ExecutionCall<?> call ) { - for ( int i = 0; i < call.arity(); i++ ) - if ( - (!_isSimpleRowMajorMatrix( call.input( i ) ) && !_isSimpleColumnMajorMatrix( call.input( i ) )) + for ( int i = 0; i < call.arity(); i++ ) + if ( + (!_isSimpleRowMajorMatrix( call.input( i ) ) && !_isSimpleColumnMajorMatrix( call.input( i ) )) || - call.input( i ).isPartialSlice() + call.input( i ).isPartialSlice() ) { - _LOG.debug("Auto cloning a tensor which does not have a simple ND configuration..."); - call = call.withInputAt( i, call.input( i ).deepCopy().mut().setIsIntermediate( true ) ); + _LOG.debug("Auto cloning a tensor which does not have a simple ND configuration..."); + call = call.withInputAt( i, call.input( i ).deepCopy().mut().setIsIntermediate( true ) ); /* The user should do cloning explicitly because using slices will cause the backend to perform auto cloning every time the @@ -154,16 +151,16 @@ */ } - return call; + return call; } private static boolean _isSimpleColumnMajorMatrix( Tensor<?> t ) { - return t.rank() == 2 && t.getNDConf().getLayout() == NDConfiguration.Layout.COLUMN_MAJOR; + return t.rank() == 2 && t.getNDConf().getLayout() == NDConfiguration.Layout.COLUMN_MAJOR; } private static boolean _isSimpleRowMajorMatrix( Tensor<?> t ) { - return t.rank() == 2 && t.getNDConf().getLayout() == NDConfiguration.Layout.ROW_MAJOR; + return t.rank() == 2 && t.getNDConf().getLayout() == NDConfiguration.Layout.ROW_MAJOR; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.html b/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.html index 7cc695339..7ff8d06e1 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.html @@ -1 +1 @@ -NDConvolution

NDConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a030603
NDConvolution()8100%n/a010301
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
\ No newline at end of file +NDConvolution

NDConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a030603
NDConvolution()8100%n/a010301
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.java.html index a1f783882..463f891d7 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/NDConvolution.java.html @@ -16,4 +16,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.html index 2b784577e..82f7a011e 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.html @@ -1 +1 @@ -ScalarAlgorithm

ScalarAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total80 of 13942%10 of 1637%916132738
lambda$new$4(ExecutionCall)410%20%227711
lambda$new$2(Tensor[])222452%8642%581401
lambda$new$0(ExecutionCall)100%n/a113311
lambda$new$5(Function, ExecutionCall)70%n/a112211
ScalarAlgorithm()20100%n/a010601
lambda$new$3(ExecutionCall)9100%n/a010401
lambda$new$1(Tensor)5100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +ScalarAlgorithm

ScalarAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total80 of 13942%10 of 1637%916132738
lambda$new$4(ExecutionCall)410%20%227711
lambda$new$2(Tensor[])222452%8642%581401
lambda$new$0(ExecutionCall)100%n/a113311
lambda$new$5(Function, ExecutionCall)70%n/a112211
ScalarAlgorithm()20100%n/a010601
lambda$new$3(ExecutionCall)9100%n/a010401
lambda$new$1(Tensor)5100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.java.html index ca1bf4eac..1df49e7dc 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarAlgorithm.java.html @@ -53,4 +53,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.html index 7f55b46f1..a46054a10 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.html @@ -1 +1 @@ -ScalarBroadcast

ScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total76 of 22065%9 of 1850%1018164539
lambda$new$4(ExecutionCall)500%20%22111111
lambda$new$0(ExecutionCall)100%n/a113311
lambda$new$2(Tensor[])73984%6857%580401
lambda$new$5(Function, ExecutionCall)70%n/a112211
lambda$new$6(ScalarFun, ExecutionCall)25696%1150%1201201
ScalarBroadcast(ScalarFun)34100%n/a010801
lambda$new$3(ExecutionCall)9100%n/a010401
lambda$new$1(Tensor)5100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +ScalarBroadcast

ScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total132 of 22040%10 of 1844%1118284549
lambda$new$6(ScalarFun, ExecutionCall)580%20%22121211
lambda$new$4(ExecutionCall)500%20%22111111
lambda$new$0(ExecutionCall)100%n/a113311
lambda$new$2(Tensor[])73984%6857%580401
lambda$new$5(Function, ExecutionCall)70%n/a112211
ScalarBroadcast(ScalarFun)34100%n/a010801
lambda$new$3(ExecutionCall)9100%n/a010401
lambda$new$1(Tensor)5100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.java.html index fdfe9a4bb..ef6f68237 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarBroadcast.java.html @@ -65,22 +65,22 @@ setImplementationFor( OpenCLDevice.class, call -> { - int d = call.getValOf(Arg.DerivIdx.class); - CPUFun f = d < 0 ? fun.getActivation() : fun.getDerivative(); - double value = f.invoke( call.input( Number.class, 1 ).at(0).get().doubleValue() ); - Tensor<Number> t = call.input( Number.class, 0 ); - int gwz = t.size(); - call.getDevice() - .getKernel("scalar_broadcast") - .passAllOf(t) - .pass((float) value) - .pass(t.rank()) - .call( gwz ); + int d = call.getValOf(Arg.DerivIdx.class); + CPUFun f = d < 0 ? fun.getActivation() : fun.getDerivative(); + double value = f.invoke( call.input( Number.class, 1 ).at(0).get().doubleValue() ); + Tensor<Number> t = call.input( Number.class, 0 ); + int gwz = t.size(); + call.getDevice() + .getKernel("scalar_broadcast") + .passAllOf(t) + .pass((float) value) + .pass(t.rank()) + .call( gwz ); - return call.input(0); + return call.input(0); } ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.html index 8e4941b33..97a4826be 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.html @@ -1 +1 @@ -ScalarSumAlgorithm

ScalarSumAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 12676%3 of 862%31242518
lambda$new$4(Shape, ADTarget)220%20%223311
lambda$new$5(Function, ExecutionCall)75688%1150%1211101
ScalarSumAlgorithm()13100%n/a010601
lambda$new$1(Tensor)11100%4100%030101
lambda$new$2(ExecutionCall)9100%n/a010401
lambda$new$0(Tensor)5100%n/a010101
lambda$new$3(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +ScalarSumAlgorithm

ScalarSumAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 12676%3 of 862%31242518
lambda$new$4(Shape, ADTarget)220%20%223311
lambda$new$5(Function, ExecutionCall)75688%1150%1211101
ScalarSumAlgorithm()13100%n/a010601
lambda$new$1(Tensor)11100%4100%030101
lambda$new$2(ExecutionCall)9100%n/a010401
lambda$new$0(Tensor)5100%n/a010101
lambda$new$3(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.java.html index 8c2ebd9e5..81c124788 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/ScalarSumAlgorithm.java.html @@ -40,4 +40,4 @@ .buildFunAlgorithm(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.html b/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.html index 4c05b1954..557ec23bf 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.html @@ -1 +1 @@ -SumAlgorithm

SumAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 9782%3 of 425%31032418
lambda$new$5(ExecutionCall)130%20%223311
lambda$new$3(Shape, ADTarget)41881%1150%120301
lambda$new$4(Function, ExecutionCall)33100%n/a010701
SumAlgorithm()15100%n/a010701
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +SumAlgorithm

SumAlgorithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 9782%3 of 425%31032418
lambda$new$5(ExecutionCall)130%20%223311
lambda$new$3(Shape, ADTarget)41881%1150%120301
lambda$new$4(Function, ExecutionCall)33100%n/a010701
SumAlgorithm()15100%n/a010701
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.java.html index f8d2cef56..8be2b8b77 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/SumAlgorithm.java.html @@ -42,4 +42,4 @@ .buildFunAlgorithm(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/Util.html b/docs/coverage/test/html/neureka.backend.main.algorithms/Util.html index 15a65f422..841dd2197 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/Util.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/Util.html @@ -1 +1 @@ -Util

Util

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 9096%0 of 6100%1511412
Util()30%n/a111111
transpose(Tensor)87100%6100%0401301
\ No newline at end of file +Util

Util

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 9096%0 of 6100%1511412
Util()30%n/a111111
transpose(Tensor)87100%6100%0401301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/Util.java.html b/docs/coverage/test/html/neureka.backend.main.algorithms/Util.java.html index 8a7aed536..f632d4c82 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/Util.java.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/Util.java.html @@ -26,4 +26,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/index.html b/docs/coverage/test/html/neureka.backend.main.algorithms/index.html index 839163c2d..08214b43c 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/index.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/index.html @@ -1 +1 @@ -neureka.backend.main.algorithms

neureka.backend.main.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total318 of 2,30786%65 of 22571%692204942213107012
ScalarAlgorithm805942%10637%91613273801
ScalarBroadcast7614465%9950%101816453901
DotProductAlgorithm5140488%163266%164447812001
ScalarSumAlgorithm299776%3562%3124251801
Broadcast2731992%64287%8334532901
MatMulAlgorithm2142295%93479%94027301801
SumAlgorithm178082%3125%3103241801
ElementwiseAlgorithm610494%2250%3100251801
BiScalarBroadcast515296%62076%6200250701
BiElementwise10297%1375%192270701
Util8796%6100%151141201
NDConvolution19100%n/a03060301
\ No newline at end of file +neureka.backend.main.algorithms

neureka.backend.main.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total369 of 2,30683%66 of 22570%702196142214106012
ScalarBroadcast1328840%10844%111828454901
ScalarAlgorithm805942%10637%91613273801
DotProductAlgorithm5140488%163266%164447812001
ScalarSumAlgorithm299776%3562%3124251801
Broadcast2731992%64287%8334532901
SumAlgorithm178082%3125%3103241801
MatMulAlgorithm1642396%93479%93927201701
ElementwiseAlgorithm610494%2250%3100251801
BiScalarBroadcast515296%62076%6200250701
BiElementwise10597%1375%192280701
Util8796%6100%151141201
NDConvolution19100%n/a03060301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.algorithms/index.source.html b/docs/coverage/test/html/neureka.backend.main.algorithms/index.source.html index 222bdf57f..5f2031673 100644 --- a/docs/coverage/test/html/neureka.backend.main.algorithms/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.algorithms/index.source.html @@ -1 +1 @@ -neureka.backend.main.algorithms

neureka.backend.main.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total318 of 2,30786%65 of 22571%692204942213107012
ScalarAlgorithm.java805942%10637%91613273801
ScalarBroadcast.java7614465%9950%101816453901
DotProductAlgorithm.java5140488%163266%164447812001
ScalarSumAlgorithm.java299776%3562%3124251801
Broadcast.java2731992%64287%8334532901
MatMulAlgorithm.java2142295%93479%94027301801
SumAlgorithm.java178082%3125%3103241801
ElementwiseAlgorithm.java610494%2250%3100251801
BiScalarBroadcast.java515296%62076%6200250701
BiElementwise.java10297%1375%192270701
Util.java8796%6100%151141201
NDConvolution.java19100%n/a03060301
\ No newline at end of file +neureka.backend.main.algorithms

neureka.backend.main.algorithms

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total369 of 2,30683%66 of 22570%702196142214106012
ScalarBroadcast.java1328840%10844%111828454901
ScalarAlgorithm.java805942%10637%91613273801
DotProductAlgorithm.java5140488%163266%164447812001
ScalarSumAlgorithm.java299776%3562%3124251801
Broadcast.java2731992%64287%8334532901
SumAlgorithm.java178082%3125%3103241801
MatMulAlgorithm.java1642396%93479%93927201701
ElementwiseAlgorithm.java610494%2250%3100251801
BiScalarBroadcast.java515296%62076%6200250701
BiElementwise.java10597%1375%192280701
Util.java8796%6100%151141201
NDConvolution.java19100%n/a03060301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.html index 46d20fe3f..54d604e96 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.html @@ -1 +1 @@ -CLBroadcast

CLBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 8793%2 of 450%2501503
lambda$new$0(ExecutionCall)66191%2250%2301101
CLBroadcast(String, String, String)13100%n/a010301
lambda$new$1(KernelCode)7100%n/a010101
\ No newline at end of file +CLBroadcast

CLBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total87 of 870%4 of 40%55151533
lambda$new$0(ExecutionCall)670%40%33111111
CLBroadcast(String, String, String)130%n/a113311
lambda$new$1(KernelCode)70%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.java.html index 48a771eca..a0d3d67b6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcast.java.html @@ -8,29 +8,29 @@ public class CLBroadcast extends ParsedCLImplementation { protected CLBroadcast(String postfix, String forward, String backward) { - super( + super( call -> { - int offset = ( call.input( Number.class, 0 ) != null ? 0 : 1 ); - int gwz = ( call.input( Number.class, 0 ) != null ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size() ); - call.getDevice() - .getKernel(call) - .passAllOf( call.input( Number.class, offset ) ) - .passAllOf( call.input( Number.class, offset + 1 ) ) - .passAllOf( call.input( Number.class, offset + 2 ) ) - .pass( call.input( Number.class, 0 ).rank() ) - .pass( call.getValOf( Arg.DerivIdx.class ) ) - .call( gwz ); + int offset = ( call.input( Number.class, 0 ) != null ? 0 : 1 ); + int gwz = ( call.input( Number.class, 0 ) != null ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size() ); + call.getDevice() + .getKernel(call) + .passAllOf( call.input( Number.class, offset ) ) + .passAllOf( call.input( Number.class, offset + 1 ) ) + .passAllOf( call.input( Number.class, offset + 2 ) ) + .pass( call.input( Number.class, 0 ).rank() ) + .pass( call.getValOf( Arg.DerivIdx.class ) ) + .call( gwz ); - return call.input( 0 ); + return call.input( 0 ); }, 3, - Neureka.get().utility().readResource("kernels/broadcast_template.cl"), + Neureka.get().utility().readResource("kernels/broadcast_template.cl"), forward, backward, postfix, - kernelCode -> new KernelCode[]{kernelCode} + kernelCode -> new KernelCode[]{kernelCode} ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.html index b45bd4faf..3a5e6afcb 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.html @@ -1 +1 @@ -CLBroadcastAddition

CLBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBroadcastAddition(String)6100%n/a010201
\ No newline at end of file +CLBroadcastAddition

CLBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBroadcastAddition(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.java.html index bc2f8daa1..0de2d2fb3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastAddition.java.html @@ -3,7 +3,7 @@ public class CLBroadcastAddition extends CLBroadcast { public CLBroadcastAddition(String id) { - super(id, "value += src1 + src2;\n", "value += 1 * drain;\n"); - } + super(id, "value += src1 + src2;\n", "value += 1 * drain;\n"); + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.html index 124e1afff..9fb4ff333 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.html @@ -1 +1 @@ -CLBroadcastDivision

CLBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBroadcastDivision(String)6100%n/a010201
\ No newline at end of file +CLBroadcastDivision

CLBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBroadcastDivision(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.java.html index 3065771d2..70a675da5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastDivision.java.html @@ -3,7 +3,7 @@ public class CLBroadcastDivision extends CLBroadcast { public CLBroadcastDivision(String id) { - super( + super( id, "value = ((int)src1) % ((int)src2);\n", "if ( d == 0 ) {\n" + @@ -12,6 +12,6 @@ " value += (-(handle /(float)pow(target, (float)2)) ) * drain;\n" + "}" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.html index e10e0fa28..75118a0ee 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.html @@ -1 +1 @@ -CLBroadcastModulo

CLBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBroadcastModulo(String)6100%n/a010201
\ No newline at end of file +CLBroadcastModulo

CLBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBroadcastModulo(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.java.html index 79e417753..c1e9cad78 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastModulo.java.html @@ -3,7 +3,7 @@ public class CLBroadcastModulo extends CLBroadcast { public CLBroadcastModulo(String id) { - super( + super( id, "value = ((int)src1) % ((int)src2);\n", "if ( d == 0 ) {\n" + @@ -12,6 +12,6 @@ " value += (-(handle /(float)pow(target, (float)2)) ) * drain;\n" + "}" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.html index ec2295ce6..f4e8d088d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.html @@ -1 +1 @@ -CLBroadcastMultiplication

CLBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBroadcastMultiplication(String)6100%n/a010201
\ No newline at end of file +CLBroadcastMultiplication

CLBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBroadcastMultiplication(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.java.html index d52450cb3..d0056b7dd 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastMultiplication.java.html @@ -3,7 +3,7 @@ public class CLBroadcastMultiplication extends CLBroadcast { public CLBroadcastMultiplication(String id) { - super(id, "value = src1 * src2;\n", "value += ( d == 0 ? drain : handle );\n"); - } + super(id, "value = src1 * src2;\n", "value += ( d == 0 ? drain : handle );\n"); + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.html index 7b1963e9a..1dc2352ea 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.html @@ -1 +1 @@ -CLBroadcastPower

CLBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBroadcastPower(String)6100%n/a010201
\ No newline at end of file +CLBroadcastPower

CLBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBroadcastPower(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.java.html index 0c89d17ab..4188b80c9 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastPower.java.html @@ -3,7 +3,7 @@ public class CLBroadcastPower extends CLBroadcast { public CLBroadcastPower(String id) { - super( + super( id, "value += pow(src1, src2);", "if ( d == 0 ) {\n" + @@ -12,6 +12,6 @@ " value += (pow(target, handle) * log(handle)) * drain;\n" + "}" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.html index 5a9f7b665..8ff176e29 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.html @@ -1 +1 @@ -CLBroadcastSubtraction

CLBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBroadcastSubtraction(String)6100%n/a010201
\ No newline at end of file +CLBroadcastSubtraction

CLBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBroadcastSubtraction(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.java.html index 4a87ab175..bac64da38 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLBroadcastSubtraction.java.html @@ -3,7 +3,7 @@ public class CLBroadcastSubtraction extends CLBroadcast { public CLBroadcastSubtraction(String id) { - super(id, "value += src1 - src2;\n", "value += src1 + src2 * -((d * 2) -1);\n"); - } + super(id, "value += src1 - src2;\n", "value += src1 + src2 * -((d * 2) -1);\n"); + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.html index 8e78bda65..32ad9f631 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.html @@ -1 +1 @@ -CLScalarBroadcast

CLScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total49 of 16269%1 of 785%211123115
lambda$new$0(ExecutionCall)460%n/a11111111
lambda$new$1(KernelCode, String)36195%1685%1711301
lambda$new$3(KernelCode)36100%n/a010401
CLScalarBroadcast(String, String, String)13100%n/a010301
lambda$new$2(int)3100%n/a010101
\ No newline at end of file +CLScalarBroadcast

CLScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total162 of 1620%7 of 70%1111313155
lambda$new$1(KernelCode, String)640%70%77131311
lambda$new$0(ExecutionCall)460%n/a11111111
lambda$new$3(KernelCode)360%n/a114411
CLScalarBroadcast(String, String, String)130%n/a113311
lambda$new$2(int)30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.java.html index 933c1941b..8901702e5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcast.java.html @@ -16,7 +16,7 @@ public CLScalarBroadcast( String postfix, String activation, String derivation ) { - super( + super( call->{ Tensor<Number> t = call.input( Number.class, 0 ); int gwz = t.size(); @@ -32,35 +32,35 @@ return call.input(0); }, 2, - Neureka.get().utility().readResource("kernels/scalarization_template.cl"), + Neureka.get().utility().readResource("kernels/scalarization_template.cl"), activation, derivation, postfix, kernelCode -> { - String[] types = new String[]{ + String[] types = new String[]{ "float", "double", "int", "long", "short", "char" }; - return - Arrays.stream(types).map( type -> { - String newName = kernelCode.getName() + ("_" + type); - String newCode = kernelCode.getCode() - .replace(TYPE, type) - .replace(kernelCode.getName(), newName); + return + Arrays.stream(types).map( type -> { + String newName = kernelCode.getName() + ("_" + type); + String newCode = kernelCode.getCode() + .replace(TYPE, type) + .replace(kernelCode.getName(), newName); DataType<?> dt; - switch (type) { - case "float": dt = DataType.of(Float.class); break; - case "double": dt = DataType.of(Double.class); break; - case "int": dt = DataType.of(Integer.class); break; - case "long": dt = DataType.of(Long.class); break; - case "short": dt = DataType.of(Short.class); break; - case "char": dt = DataType.of(Byte.class); break; + switch (type) { + case "float": dt = DataType.of(Float.class); break; + case "double": dt = DataType.of(Double.class); break; + case "int": dt = DataType.of(Integer.class); break; + case "long": dt = DataType.of(Long.class); break; + case "short": dt = DataType.of(Short.class); break; + case "char": dt = DataType.of(Byte.class); break; default: dt = DataType.of(Float.class); break; } - return new KernelCode(newName, newCode, dt); + return new KernelCode(newName, newCode, dt); }) - .toArray(KernelCode[]::new); + .toArray(KernelCode[]::new); } ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.html index b9bea87a8..765084d4c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.html @@ -1 +1 @@ -CLScalarBroadcastAddition

CLScalarBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total24 of 9273%3 of 650%3621903
run(ExecutionCall)246171%3350%3421601
CLScalarBroadcastAddition(String)6100%n/a010201
static {...}1100%n/a010101
\ No newline at end of file +CLScalarBroadcastAddition

CLScalarBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total92 of 920%6 of 60%66191933
run(ExecutionCall)850%60%44161611
CLScalarBroadcastAddition(String)60%n/a112211
static {...}10%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.java.html index 7b0daa95b..ced929d20 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastAddition.java.html @@ -5,32 +5,32 @@ import neureka.devices.opencl.OpenCLDevice; import neureka.math.args.Arg; -public class CLScalarBroadcastAddition extends CLScalarBroadcast +public class CLScalarBroadcastAddition extends CLScalarBroadcast { public CLScalarBroadcastAddition(String id) { - super( id, "output = input1 + value;\n", "output = 1;\n" ); - } + super( id, "output = input1 + value;\n", "output = 1;\n" ); + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { - assert call.arity() == 3; - if ( call.getDerivativeIndex() == 0 ) + assert call.arity() == 3; + if ( call.getDerivativeIndex() == 0 ) return Tensor.of( call.input(1).shape(), 1d ).mut().setIsIntermediate( true ); - else if ( call.getDerivativeIndex() == 1 ) + else if ( call.getDerivativeIndex() == 1 ) return Tensor.of( call.input( 2 ).shape(), 1d ).mut().setIsIntermediate( true ); else { - int gwz = call.input(Number.class, 0).size(); - float value = call.input(Number.class, 2).item(0).floatValue(); - call.getDevice() - .getKernel(call) - .passAllOf(call.input(Number.class, 0)) - .passAllOf(call.input(Number.class, 1)) - .pass(value) - .pass(call.input(Number.class, 0).rank()) - .pass(call.getValOf(Arg.DerivIdx.class)) - .call(gwz); + int gwz = call.input(Number.class, 0).size(); + float value = call.input(Number.class, 2).item(0).floatValue(); + call.getDevice() + .getKernel(call) + .passAllOf(call.input(Number.class, 0)) + .passAllOf(call.input(Number.class, 1)) + .pass(value) + .pass(call.input(Number.class, 0).rank()) + .pass(call.getValOf(Arg.DerivIdx.class)) + .call(gwz); } - return call.input( 0 ); + return call.input( 0 ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.html index 20ee6c35c..db1ec8b44 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.html @@ -1 +1 @@ -CLScalarBroadcastDivision

CLScalarBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total72 of 787%4 of 40%34101212
run(ExecutionCall)720%40%33101011
CLScalarBroadcastDivision(String)6100%n/a010201
\ No newline at end of file +CLScalarBroadcastDivision

CLScalarBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total78 of 780%4 of 40%44121222
run(ExecutionCall)720%40%33101011
CLScalarBroadcastDivision(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.java.html index afa429a81..e2972575b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastDivision.java.html @@ -8,7 +8,7 @@ public class CLScalarBroadcastDivision extends CLScalarBroadcast { public CLScalarBroadcastDivision( String id ) { - super( + super( id, "output = ("+TYPE+")(((float)input1) / ((float)value));\n", "if ( d == 0 ) { \n" + @@ -17,7 +17,7 @@ " output = -(("+TYPE+")(((float)value) /(float)pow((float)input1, 2.0f))); \n" + "} \n" ); - } + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { @@ -34,4 +34,4 @@ return call.input( 0 ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.html index e53e6319b..6369c79ea 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.html @@ -1 +1 @@ -CLScalarBroadcastIdentity

CLScalarBroadcastIdentity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 51100%0 of 0n/a0201302
run(ExecutionCall)45100%n/a0101101
CLScalarBroadcastIdentity(String)6100%n/a010201
\ No newline at end of file +CLScalarBroadcastIdentity

CLScalarBroadcastIdentity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total51 of 510%0 of 0n/a22131322
run(ExecutionCall)450%n/a11111111
CLScalarBroadcastIdentity(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.java.html index 895efdf22..98298ddbf 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastIdentity.java.html @@ -8,27 +8,27 @@ public class CLScalarBroadcastIdentity extends CLScalarBroadcast { public CLScalarBroadcastIdentity(String id) { - super( + super( id, "output = value;\n", "output = value;\n" ); - } + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { - Tensor<Number> t = call.input( Number.class, 0 ); - int gwz = t.size(); - call.getDevice() - .getKernel(call) - .passAllOf( t ) - .passAllOf( t ) - .pass( call.input( Number.class, 1 ).at(0).get() ) - .pass( t.rank() ) - .pass( call.getValOf( Arg.DerivIdx.class ) ) - .call( gwz ); + Tensor<Number> t = call.input( Number.class, 0 ); + int gwz = t.size(); + call.getDevice() + .getKernel(call) + .passAllOf( t ) + .passAllOf( t ) + .pass( call.input( Number.class, 1 ).at(0).get() ) + .pass( t.rank() ) + .pass( call.getValOf( Arg.DerivIdx.class ) ) + .call( gwz ); - return call.input(0); + return call.input(0); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.html index a741ea7f2..c5b8c72a3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.html @@ -1 +1 @@ -CLScalarBroadcastModulo

CLScalarBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total72 of 787%4 of 40%34111312
run(ExecutionCall)720%40%33111111
CLScalarBroadcastModulo(String)6100%n/a010201
\ No newline at end of file +CLScalarBroadcastModulo

CLScalarBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total78 of 780%4 of 40%44131322
run(ExecutionCall)720%40%33111111
CLScalarBroadcastModulo(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.java.html index da12b7c61..2955f292c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastModulo.java.html @@ -8,7 +8,7 @@ public class CLScalarBroadcastModulo extends CLScalarBroadcast { public CLScalarBroadcastModulo(String id ) { - super( + super( id, "output = ("+TYPE+")(((int)input1) % ((int)value)); \n", " if ( d == 0 ) { \n" + @@ -17,7 +17,7 @@ " output = ("+TYPE+")(-value /(float)pow((float)input1, 2.0f)); \n" + " }" ); - } + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { @@ -35,4 +35,4 @@ return call.input( 0 ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.html index f49375582..3f44488f6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.html @@ -1 +1 @@ -CLScalarBroadcastMultiplication

CLScalarBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total1 of 10299%1 of 887%1601702
run(ExecutionCall)19598%1787%1501501
CLScalarBroadcastMultiplication(String)6100%n/a010201
\ No newline at end of file +CLScalarBroadcastMultiplication

CLScalarBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total102 of 1020%8 of 80%66171722
run(ExecutionCall)960%80%55151511
CLScalarBroadcastMultiplication(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.java.html index 76220aeab..b27f1cb31 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastMultiplication.java.html @@ -8,28 +8,28 @@ public class CLScalarBroadcastMultiplication extends CLScalarBroadcast { public CLScalarBroadcastMultiplication(String id) { - super( id, "output = input1 * value;\n", "if ( d == 0 ) {output = value;}else{output = input1;}\n" ); - } + super( id, "output = input1 * value;\n", "if ( d == 0 ) {output = value;}else{output = input1;}\n" ); + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { - if ( call.getDerivativeIndex() == 0 ) - return call.input( 2 ).shallowCopy().mut().setIsIntermediate( true ); - else if ( call.getDerivativeIndex() == 1 ) - return call.input( 1 ).shallowCopy().mut().setIsIntermediate( true ); + if ( call.getDerivativeIndex() == 0 ) + return call.input( 2 ).shallowCopy().mut().setIsIntermediate( true ); + else if ( call.getDerivativeIndex() == 1 ) + return call.input( 1 ).shallowCopy().mut().setIsIntermediate( true ); else { - int offset = (call.input(Number.class, 2).isVirtual() || call.input(Number.class, 2).size() == 1) ? 1 : 0; - int gwz = call.input(Number.class, 0).size(); - call.getDevice() - .getKernel(call) - .passAllOf(call.input(Number.class, 0)) - .passAllOf(call.input(Number.class, 0 + offset)) - .pass( call.input( Number.class, 1 + offset ).at( 0 ).get() ) - .pass(call.input(Number.class, 0).rank()) - .pass(call.getValOf(Arg.DerivIdx.class)) - .call(gwz); + int offset = (call.input(Number.class, 2).isVirtual() || call.input(Number.class, 2).size() == 1) ? 1 : 0; + int gwz = call.input(Number.class, 0).size(); + call.getDevice() + .getKernel(call) + .passAllOf(call.input(Number.class, 0)) + .passAllOf(call.input(Number.class, 0 + offset)) + .pass( call.input( Number.class, 1 + offset ).at( 0 ).get() ) + .pass(call.input(Number.class, 0).rank()) + .pass(call.getValOf(Arg.DerivIdx.class)) + .call(gwz); } - return call.input( 0 ); + return call.input( 0 ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.html index 797e90374..3dddbeee9 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.html @@ -1 +1 @@ -CLScalarBroadcastPower

CLScalarBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total72 of 787%4 of 40%34111312
run(ExecutionCall)720%40%33111111
CLScalarBroadcastPower(String)6100%n/a010201
\ No newline at end of file +CLScalarBroadcastPower

CLScalarBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total78 of 780%4 of 40%44131322
run(ExecutionCall)720%40%33111111
CLScalarBroadcastPower(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.java.html index d6fdfddfd..5e3a9d5a5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastPower.java.html @@ -8,7 +8,7 @@ public class CLScalarBroadcastPower extends CLScalarBroadcast { public CLScalarBroadcastPower( String id ) { - super( + super( id, "output = ("+TYPE+") pow( (float) input1, (float) value );", " if ( d == 0 ) \n" + @@ -16,7 +16,7 @@ " else \n" + " output = ("+TYPE+") ( pow( (float) input1, (float) value ) * log( (float) value ) ); \n" ); - } + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { int offset = (call.input( Number.class, 2 ).isVirtual() || call.input( Number.class, 2 ).size() == 1)?1:0; @@ -33,4 +33,4 @@ return call.input(0); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.html index 57d3676e5..cc0ab1a09 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.html @@ -1 +1 @@ -CLScalarBroadcastSubtraction

CLScalarBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total73 of 797%4 of 40%34111312
run(ExecutionCall)730%40%33111111
CLScalarBroadcastSubtraction(String)6100%n/a010201
\ No newline at end of file +CLScalarBroadcastSubtraction

CLScalarBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total79 of 790%4 of 40%44131322
run(ExecutionCall)730%40%33111111
CLScalarBroadcastSubtraction(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.java.html index 552c3b7e4..91ffde7be 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CLScalarBroadcastSubtraction.java.html @@ -8,8 +8,8 @@ public class CLScalarBroadcastSubtraction extends CLScalarBroadcast { public CLScalarBroadcastSubtraction(String id) { - super( id, "output = input1 - value;\n", "if (d==0) { output = 1; } else { output = -1; }" ); - } + super( id, "output = input1 - value;\n", "if (d==0) { output = 1; } else { output = -1; }" ); + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { @@ -27,4 +27,4 @@ return call.input(0); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.html index fc3ff75ec..db43f781c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.html @@ -1 +1 @@ -CPUBroadcast

CPUBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 76295%10 of 9088%1053216008
_newWorkloadFor(ExecutionCall)156781%2880%2621601
_broadcastF64(Tensor, Tensor, Tensor, int, int, int, CPUBiFun)1031196%43690%42106801
_broadcastF32(Tensor, Tensor, Tensor, int, int, int, CPUBiFun)1031196%43690%42106801
run(ExecutionCall)16100%n/a010601
lambda$_newWorkloadFor$1(Tensor, Tensor, Tensor, int, CPUBiFun, int, int)9100%n/a010101
lambda$_newWorkloadFor$0(Tensor, Tensor, Tensor, int, CPUBiFun, int, int)9100%n/a010101
CPUBroadcast()3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +CPUBroadcast

CPUBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 76295%10 of 9088%1053216008
_newWorkloadFor(ExecutionCall)156781%2880%2621601
_broadcastF64(Tensor, Tensor, Tensor, int, int, int, CPUBiFun)1031196%43690%42106801
_broadcastF32(Tensor, Tensor, Tensor, int, int, int, CPUBiFun)1031196%43690%42106801
run(ExecutionCall)16100%n/a010601
lambda$_newWorkloadFor$1(Tensor, Tensor, Tensor, int, CPUBiFun, int, int)9100%n/a010101
lambda$_newWorkloadFor$0(Tensor, Tensor, Tensor, int, CPUBiFun, int, int)9100%n/a010101
CPUBroadcast()3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.java.html index ed4c11f78..2ee47cfe5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcast.java.html @@ -261,4 +261,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition$1.html index 8a22c078d..0169526a7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition$1.html @@ -1 +1 @@ -CPUBroadcastAddition.new CPUBiFun() {...}

CPUBroadcastAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 2751%4 of 40%472525
invoke(boolean, boolean)80%40%331111
invoke(char, char)50%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file +CPUBroadcastAddition.new CPUBiFun() {...}

CPUBroadcastAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 2751%4 of 40%472525
invoke(boolean, boolean)80%40%331111
invoke(char, char)50%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.html index 79ae05a8d..42ba1f83c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.html @@ -1 +1 @@ -CPUBroadcastAddition

CPUBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a040404
_getFun()5100%n/a010101
CPUBroadcastAddition()3100%n/a010101
_getDeriveAt0()3100%n/a010101
_getDeriveAt1()3100%n/a010101
\ No newline at end of file +CPUBroadcastAddition

CPUBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a040404
_getFun()5100%n/a010101
CPUBroadcastAddition()3100%n/a010101
_getDeriveAt0()3100%n/a010101
_getDeriveAt1()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.java.html index 20a3247d2..701d3f80c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastAddition.java.html @@ -20,4 +20,4 @@ @Override protected CPUBiFun _getDeriveAt1() { return _getFun(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$1.html index ef5bc5089..dd886c64a 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$1.html @@ -1 +1 @@ -CPUBroadcastDivision.new CPUBiFun() {...}

CPUBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file +CPUBroadcastDivision.new CPUBiFun() {...}

CPUBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$2.html index cbc07abd5..dcd9f8b7c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$2.html @@ -1 +1 @@ -CPUBroadcastDivision.new CPUBiFun() {...}

CPUBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file +CPUBroadcastDivision.new CPUBiFun() {...}

CPUBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$3.html index a38b03794..573a8ff73 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision$3.html @@ -1 +1 @@ -CPUBroadcastDivision.new CPUBiFun() {...}

CPUBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 230%0 of 0n/a333333
invoke(float, float)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUBroadcastDivision.new CPUBiFun() {...}

CPUBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 230%0 of 0n/a333333
invoke(float, float)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.html index 15e272e67..ff905b1d4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.html @@ -1 +1 @@ -CPUBroadcastDivision

CPUBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastDivision()3100%n/a010101
\ No newline at end of file +CPUBroadcastDivision

CPUBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastDivision()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.java.html index 15d2bb964..3566a62d1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastDivision.java.html @@ -30,4 +30,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$1.html index 7cfba3a05..d763ee855 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$1.html @@ -1 +1 @@ -CPUBroadcastModulo.new CPUBiFun() {...}

CPUBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1471%0 of 0n/a131313
invoke(float, float)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
\ No newline at end of file +CPUBroadcastModulo.new CPUBiFun() {...}

CPUBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1471%0 of 0n/a131313
invoke(float, float)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$2.html index 6a610015b..4003e3f48 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$2.html @@ -1 +1 @@ -CPUBroadcastModulo.new CPUBiFun() {...}

CPUBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file +CPUBroadcastModulo.new CPUBiFun() {...}

CPUBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$3.html index 5fc253a1f..3246f66d7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo$3.html @@ -1 +1 @@ -CPUBroadcastModulo.new CPUBiFun() {...}

CPUBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 230%0 of 0n/a333333
invoke(float, float)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUBroadcastModulo.new CPUBiFun() {...}

CPUBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 230%0 of 0n/a333333
invoke(float, float)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.html index 61db2a4f4..4bfac2146 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.html @@ -1 +1 @@ -CPUBroadcastModulo

CPUBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastModulo()3100%n/a010101
\ No newline at end of file +CPUBroadcastModulo

CPUBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastModulo()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.java.html index bcc1bacfb..7bee20708 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastModulo.java.html @@ -30,4 +30,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$1.html index 96302aeac..4481fd21e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$1.html @@ -1 +1 @@ -CPUBroadcastMultiplication.new CPUBiFun() {...}

CPUBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file +CPUBroadcastMultiplication.new CPUBiFun() {...}

CPUBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$2.html index 19b59da07..51ad34795 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$2.html @@ -1 +1 @@ -CPUBroadcastMultiplication.new CPUBiFun() {...}

CPUBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 100%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
\ No newline at end of file +CPUBroadcastMultiplication.new CPUBiFun() {...}

CPUBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 100%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$3.html index 26cc5614f..490aec5b8 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication$3.html @@ -1 +1 @@ -CPUBroadcastMultiplication.new CPUBiFun() {...}

CPUBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 100%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
\ No newline at end of file +CPUBroadcastMultiplication.new CPUBiFun() {...}

CPUBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 100%0 of 0n/a333333
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.html index da7546fc2..46193bc59 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.html @@ -1 +1 @@ -CPUBroadcastMultiplication

CPUBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastMultiplication()3100%n/a010101
\ No newline at end of file +CPUBroadcastMultiplication

CPUBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastMultiplication()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.java.html index 339038d25..77d39d5dc 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastMultiplication.java.html @@ -30,4 +30,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$1.html index cb1c87e49..1ebdc4e77 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$1.html @@ -1 +1 @@ -CPUBroadcastPower.new CPUBiFun() {...}

CPUBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1776%0 of 0n/a131313
invoke(double, double)40%n/a111111
invoke(float, float)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +CPUBroadcastPower.new CPUBiFun() {...}

CPUBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1776%0 of 0n/a131313
invoke(double, double)40%n/a111111
invoke(float, float)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$2.html index 7c9eb6c8c..c3257b8d0 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$2.html @@ -1 +1 @@ -CPUBroadcastPower.new CPUBiFun() {...}

CPUBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total26 of 260%0 of 0n/a333333
invoke(float, float)120%n/a111111
invoke(double, double)80%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUBroadcastPower.new CPUBiFun() {...}

CPUBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total26 of 260%0 of 0n/a333333
invoke(float, float)120%n/a111111
invoke(double, double)80%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$3.html index 2ea2d0f4c..c8b425655 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower$3.html @@ -1 +1 @@ -CPUBroadcastPower.new CPUBiFun() {...}

CPUBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total24 of 240%0 of 0n/a333333
invoke(float, float)110%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUBroadcastPower.new CPUBiFun() {...}

CPUBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total24 of 240%0 of 0n/a333333
invoke(float, float)110%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.html index 2d8fc73d9..d4698cfd9 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.html @@ -1 +1 @@ -CPUBroadcastPower

CPUBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastPower()3100%n/a010101
\ No newline at end of file +CPUBroadcastPower

CPUBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 1844%0 of 0n/a242424
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
CPUBroadcastPower()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.java.html index ab67584a7..9b6245466 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastPower.java.html @@ -30,4 +30,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$1.html index 6c938e26b..80450f53b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$1.html @@ -1 +1 @@ -CPUBroadcastSubtraction.new CPUBiFun() {...}

CPUBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file +CPUBroadcastSubtraction.new CPUBiFun() {...}

CPUBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$2.html index 9890c35fe..367b80b94 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$2.html @@ -1 +1 @@ -CPUBroadcastSubtraction.new CPUBiFun() {...}

CPUBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file +CPUBroadcastSubtraction.new CPUBiFun() {...}

CPUBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$3.html index f9f96dcd8..7eaea7bdc 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction$3.html @@ -1 +1 @@ -CPUBroadcastSubtraction.new CPUBiFun() {...}

CPUBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file +CPUBroadcastSubtraction.new CPUBiFun() {...}

CPUBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.html index defc17922..8d0debb47 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.html @@ -1 +1 @@ -CPUBroadcastSubtraction

CPUBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBroadcastSubtraction()3100%n/a010101
\ No newline at end of file +CPUBroadcastSubtraction

CPUBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBroadcastSubtraction()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.java.html index 97c09562b..f09dd1ed2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSubtraction.java.html @@ -30,4 +30,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$1.html index 12f275b7f..5cc4251aa 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$1.html @@ -1 +1 @@ -CPUBroadcastSummation.new CPUBiFun() {...}

CPUBroadcastSummation.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 350%4 of 40%997777
invoke(boolean, boolean)80%40%331111
{...}60%n/a111111
invoke(char, char)50%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file +CPUBroadcastSummation.new CPUBiFun() {...}

CPUBroadcastSummation.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 350%4 of 40%997777
invoke(boolean, boolean)80%40%331111
{...}60%n/a111111
invoke(char, char)50%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$2.html index 85cf0ec61..17af1c165 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$2.html @@ -1 +1 @@ -CPUBroadcastSummation.new CPUBiFun() {...}

CPUBroadcastSummation.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%4 of 40%997777
invoke(boolean, boolean)80%40%331111
{...}60%n/a111111
invoke(char, char)50%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file +CPUBroadcastSummation.new CPUBiFun() {...}

CPUBroadcastSummation.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%4 of 40%997777
invoke(boolean, boolean)80%40%331111
{...}60%n/a111111
invoke(char, char)50%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$3.html index 6ef1faf5f..dcc760442 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation$3.html @@ -1 +1 @@ -CPUBroadcastSummation.new CPUBiFun() {...}

CPUBroadcastSummation.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%4 of 40%997777
invoke(boolean, boolean)80%40%331111
{...}60%n/a111111
invoke(char, char)50%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file +CPUBroadcastSummation.new CPUBiFun() {...}

CPUBroadcastSummation.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%4 of 40%997777
invoke(boolean, boolean)80%40%331111
{...}60%n/a111111
invoke(char, char)50%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.html index 28bd68e93..c34e216fa 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.html @@ -1 +1 @@ -CPUBroadcastSummation

CPUBroadcastSummation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 180%0 of 0n/a444444
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUBroadcastSummation()30%n/a111111
\ No newline at end of file +CPUBroadcastSummation

CPUBroadcastSummation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 180%0 of 0n/a444444
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUBroadcastSummation()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.java.html index dda7127dc..4a1908e8c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUBroadcastSummation.java.html @@ -42,4 +42,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$1.html index 203405479..203195c3e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$1.html @@ -1 +1 @@ -CPUScalaBroadcastPower.new CPUBiFun() {...}

CPUScalaBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 320%0 of 0n/a555555
invoke(int, int)80%n/a111111
invoke(float, float)70%n/a111111
invoke(long, long)70%n/a111111
{...}60%n/a111111
invoke(double, double)40%n/a111111
\ No newline at end of file +CPUScalaBroadcastPower.new CPUBiFun() {...}

CPUScalaBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 320%0 of 0n/a555555
invoke(int, int)80%n/a111111
invoke(float, float)70%n/a111111
invoke(long, long)70%n/a111111
{...}60%n/a111111
invoke(double, double)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$2.html index f2cd17cd8..8bdbea8e8 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$2.html @@ -1 +1 @@ -CPUScalaBroadcastPower.new CPUBiFun() {...}

CPUScalaBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total51 of 510%0 of 0n/a555555
invoke(int, int)130%n/a111111
invoke(float, float)120%n/a111111
invoke(long, long)120%n/a111111
invoke(double, double)80%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUScalaBroadcastPower.new CPUBiFun() {...}

CPUScalaBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total51 of 510%0 of 0n/a555555
invoke(int, int)130%n/a111111
invoke(float, float)120%n/a111111
invoke(long, long)120%n/a111111
invoke(double, double)80%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$3.html index 1f9853a5f..dee50cf50 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower$3.html @@ -1 +1 @@ -CPUScalaBroadcastPower.new CPUBiFun() {...}

CPUScalaBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total47 of 470%0 of 0n/a555555
invoke(int, int)120%n/a111111
invoke(float, float)110%n/a111111
invoke(long, long)110%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUScalaBroadcastPower.new CPUBiFun() {...}

CPUScalaBroadcastPower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total47 of 470%0 of 0n/a555555
invoke(int, int)120%n/a111111
invoke(float, float)110%n/a111111
invoke(long, long)110%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.html index 56485d315..16dbb3f97 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.html @@ -1 +1 @@ -CPUScalaBroadcastPower

CPUScalaBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalaBroadcastPower()3100%n/a010101
\ No newline at end of file +CPUScalaBroadcastPower

CPUScalaBroadcastPower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalaBroadcastPower()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.java.html index bfc91cd23..96bc242c0 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalaBroadcastPower.java.html @@ -34,4 +34,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.html index b3f7cbd37..521ae155b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.html @@ -1 +1 @@ -CPUScalarBroadcast

CPUScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 76997%3 of 4493%3341152012
_workloadFor(ExecutionCall)1839995%32388%31415501
lambda$_workloadFor$8(Tensor, Tensor, Object[], CPUBiFun, Object[], Object, int, int)37100%2100%0201001
lambda$_workloadFor$7(Tensor, Tensor, boolean[], CPUBiFun, boolean[], boolean, int, int)37100%2100%0201001
lambda$_workloadFor$6(Tensor, Tensor, char[], CPUBiFun, char[], char, int, int)37100%2100%0201001
lambda$_workloadFor$5(Tensor, Tensor, byte[], CPUBiFun, byte[], byte, int, int)37100%2100%0201001
lambda$_workloadFor$4(Tensor, Tensor, short[], CPUBiFun, short[], short, int, int)37100%2100%0201001
lambda$_workloadFor$3(Tensor, Tensor, long[], CPUBiFun, long[], long, int, int)37100%2100%0201001
lambda$_workloadFor$2(Tensor, Tensor, int[], CPUBiFun, int[], int, int, int)37100%2100%0201001
lambda$_workloadFor$1(Tensor, Tensor, float[], CPUBiFun, float[], float, int, int)37100%2100%0201001
lambda$_workloadFor$0(Tensor, Tensor, double[], CPUBiFun, double[], double, int, int)37100%2100%0201001
run(ExecutionCall)16100%n/a010601
CPUScalarBroadcast()100%n/a010101
\ No newline at end of file +CPUScalarBroadcast

CPUScalarBroadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 76997%3 of 4493%3341152012
_workloadFor(ExecutionCall)1839995%32388%31415501
lambda$_workloadFor$8(Tensor, Tensor, Object[], CPUBiFun, Object[], Object, int, int)37100%2100%0201001
lambda$_workloadFor$7(Tensor, Tensor, boolean[], CPUBiFun, boolean[], boolean, int, int)37100%2100%0201001
lambda$_workloadFor$6(Tensor, Tensor, char[], CPUBiFun, char[], char, int, int)37100%2100%0201001
lambda$_workloadFor$5(Tensor, Tensor, byte[], CPUBiFun, byte[], byte, int, int)37100%2100%0201001
lambda$_workloadFor$4(Tensor, Tensor, short[], CPUBiFun, short[], short, int, int)37100%2100%0201001
lambda$_workloadFor$3(Tensor, Tensor, long[], CPUBiFun, long[], long, int, int)37100%2100%0201001
lambda$_workloadFor$2(Tensor, Tensor, int[], CPUBiFun, int[], int, int, int)37100%2100%0201001
lambda$_workloadFor$1(Tensor, Tensor, float[], CPUBiFun, float[], float, int, int)37100%2100%0201001
lambda$_workloadFor$0(Tensor, Tensor, double[], CPUBiFun, double[], double, int, int)37100%2100%0201001
run(ExecutionCall)16100%n/a010601
CPUScalarBroadcast()100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.java.html index a5570ec14..7dd0a8585 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcast.java.html @@ -228,4 +228,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$1.html index 61ed4d69f..cf1481854 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$1.html @@ -1 +1 @@ -CPUScalarBroadcastAddition.new CPUBiFun() {...}

CPUScalarBroadcastAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 3562%4 of 40%492727
invoke(boolean, boolean)80%40%331111
invoke(char, char)50%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
invoke(long, long)4100%n/a010101
\ No newline at end of file +CPUScalarBroadcastAddition.new CPUBiFun() {...}

CPUScalarBroadcastAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 3562%4 of 40%492727
invoke(boolean, boolean)80%40%331111
invoke(char, char)50%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
invoke(long, long)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$2.html index b464a9e46..d28c7e76c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition$2.html @@ -1 +1 @@ -CPUScalarBroadcastAddition.new CPUBiFun() {...}

CPUScalarBroadcastAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 180%0 of 0n/a777777
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
invoke(char, char)20%n/a111111
invoke(boolean, boolean)20%n/a111111
\ No newline at end of file +CPUScalarBroadcastAddition.new CPUBiFun() {...}

CPUScalarBroadcastAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 180%0 of 0n/a777777
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
invoke(char, char)20%n/a111111
invoke(boolean, boolean)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.html index 880f9c437..1034ddc98 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.html @@ -1 +1 @@ -CPUScalarBroadcastAddition

CPUScalarBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 5743%3 of 650%5941026
run(ExecutionCall)241640%3350%342601
_getDeriveAt0()50%n/a111111
_getDeriveAt1()30%n/a111111
_getFun()5100%n/a010101
CPUScalarBroadcastAddition()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +CPUScalarBroadcastAddition

CPUScalarBroadcastAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 5743%3 of 650%5941026
run(ExecutionCall)241640%3350%342601
_getDeriveAt0()50%n/a111111
_getDeriveAt1()30%n/a111111
_getFun()5100%n/a010101
CPUScalarBroadcastAddition()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.java.html index 637808743..69afd90bc 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastAddition.java.html @@ -47,4 +47,4 @@ return _getDeriveAt0(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$1.html index e91050233..c682007fb 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$1.html @@ -1 +1 @@ -CPUScalarBroadcastDivision.new CPUBiFun() {...}

CPUScalarBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 220%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file +CPUScalarBroadcastDivision.new CPUBiFun() {...}

CPUScalarBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 220%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$2.html index d604ada3a..922371302 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$2.html @@ -1 +1 @@ -CPUScalarBroadcastDivision.new CPUBiFun() {...}

CPUScalarBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%0 of 0n/a555555
invoke(int, int)70%n/a111111
{...}60%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file +CPUScalarBroadcastDivision.new CPUBiFun() {...}

CPUScalarBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%0 of 0n/a555555
invoke(int, int)70%n/a111111
{...}60%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$3.html index 88dc62686..40f32d297 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision$3.html @@ -1 +1 @@ -CPUScalarBroadcastDivision.new CPUBiFun() {...}

CPUScalarBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUScalarBroadcastDivision.new CPUBiFun() {...}

CPUScalarBroadcastDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.html index 325922690..81f3c4257 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.html @@ -1 +1 @@ -CPUScalarBroadcastDivision

CPUScalarBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalarBroadcastDivision()3100%n/a010101
\ No newline at end of file +CPUScalarBroadcastDivision

CPUScalarBroadcastDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalarBroadcastDivision()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.java.html index 00582c21f..871ed1140 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastDivision.java.html @@ -34,4 +34,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity$1.html index 6e2ca4cb9..98a80bec6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity$1.html @@ -1 +1 @@ -CPUScalarBroadcastIdentity.new CPUBiFun() {...}

CPUScalarBroadcastIdentity.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a090909
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
invoke(float, float)2100%n/a010101
invoke(byte, byte)2100%n/a010101
invoke(int, int)2100%n/a010101
invoke(boolean, boolean)2100%n/a010101
invoke(char, char)2100%n/a010101
invoke(long, long)2100%n/a010101
invoke(Object, Object)2100%n/a010101
\ No newline at end of file +CPUScalarBroadcastIdentity.new CPUBiFun() {...}

CPUScalarBroadcastIdentity.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a090909
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
invoke(float, float)2100%n/a010101
invoke(byte, byte)2100%n/a010101
invoke(int, int)2100%n/a010101
invoke(boolean, boolean)2100%n/a010101
invoke(char, char)2100%n/a010101
invoke(long, long)2100%n/a010101
invoke(Object, Object)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.html index 47db61cb3..0ac28c1b9 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.html @@ -1 +1 @@ -CPUScalarBroadcastIdentity

CPUScalarBroadcastIdentity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 1457%0 of 0n/a242424
_getDeriveAt0()30%n/a111111
_getDeriveAt1()30%n/a111111
_getFun()5100%n/a010101
CPUScalarBroadcastIdentity()3100%n/a010101
\ No newline at end of file +CPUScalarBroadcastIdentity

CPUScalarBroadcastIdentity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 1457%0 of 0n/a242424
_getDeriveAt0()30%n/a111111
_getDeriveAt1()30%n/a111111
_getFun()5100%n/a010101
CPUScalarBroadcastIdentity()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.java.html index 67fc96471..3a24ae013 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastIdentity.java.html @@ -28,4 +28,4 @@ return _getFun(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$1.html index 292ebf3d3..87742bf5b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$1.html @@ -1 +1 @@ -CPUScalarBroadcastModulo.new CPUBiFun() {...}

CPUScalarBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 220%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file +CPUScalarBroadcastModulo.new CPUBiFun() {...}

CPUScalarBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 220%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$2.html index 54c12b1ee..3f4f31ae8 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$2.html @@ -1 +1 @@ -CPUScalarBroadcastModulo.new CPUBiFun() {...}

CPUScalarBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%0 of 0n/a555555
invoke(int, int)70%n/a111111
{...}60%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file +CPUScalarBroadcastModulo.new CPUBiFun() {...}

CPUScalarBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 270%0 of 0n/a555555
invoke(int, int)70%n/a111111
{...}60%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$3.html index 66ff0d48e..f5be3bf77 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo$3.html @@ -1 +1 @@ -CPUScalarBroadcastModulo.new CPUBiFun() {...}

CPUScalarBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUScalarBroadcastModulo.new CPUBiFun() {...}

CPUScalarBroadcastModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.html index bd3511a20..c5b517825 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.html @@ -1 +1 @@ -CPUScalarBroadcastModulo

CPUScalarBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalarBroadcastModulo()3100%n/a010101
\ No newline at end of file +CPUScalarBroadcastModulo

CPUScalarBroadcastModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalarBroadcastModulo()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.java.html index 2987adb0d..ab6b65bc2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastModulo.java.html @@ -34,4 +34,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$1.html index 5b8f0de01..4b9ac66de 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$1.html @@ -1 +1 @@ -CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2281%0 of 0n/a151515
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file +CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2281%0 of 0n/a151515
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$2.html index 846ee71d3..183f9e5c4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$2.html @@ -1 +1 @@ -CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file +CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$3.html index a478abedf..e299e223f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication$3.html @@ -1 +1 @@ -CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file +CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

CPUScalarBroadcastMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.html index ab9367725..b1f2e02fc 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.html @@ -1 +1 @@ -CPUScalarBroadcastMultiplication

CPUScalarBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 5574%1 of 683%3921026
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
run(ExecutionCall)43288%1583%140601
_getFun()5100%n/a010101
CPUScalarBroadcastMultiplication()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +CPUScalarBroadcastMultiplication

CPUScalarBroadcastMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 5574%1 of 683%3921026
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
run(ExecutionCall)43288%1583%140601
_getFun()5100%n/a010101
CPUScalarBroadcastMultiplication()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.java.html index 73c87e6e8..35cb9733d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastMultiplication.java.html @@ -48,4 +48,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$1.html index 4c962d034..cd060a6e5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$1.html @@ -1 +1 @@ -CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 220%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file +CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 220%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$2.html index 5e638015d..32895e04c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$2.html @@ -1 +1 @@ -CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file +CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$3.html index b532b9e48..88ddd0465 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction$3.html @@ -1 +1 @@ -CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file +CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

CPUScalarBroadcastSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 140%0 of 0n/a555555
{...}60%n/a111111
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.html index fb8331e7a..d808ab320 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.html @@ -1 +1 @@ -CPUScalarBroadcastSubtraction

CPUScalarBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalarBroadcastSubtraction()3100%n/a010101
\ No newline at end of file +CPUScalarBroadcastSubtraction

CPUScalarBroadcastSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 1816%0 of 0n/a343434
_getFun()50%n/a111111
_getDeriveAt0()50%n/a111111
_getDeriveAt1()50%n/a111111
CPUScalarBroadcastSubtraction()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.java.html index 0ad53ef7f..76c779150 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/CPUScalarBroadcastSubtraction.java.html @@ -34,4 +34,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.html index 94f1623f0..5e6d548ac 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.broadcast

neureka.backend.main.implementations.broadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,275 of 3,54664%60 of 20771%2063841956701642782768
CLScalarBroadcastSubtraction737%40%3411131201
CLScalarBroadcastModulo727%40%3411131201
CLScalarBroadcastPower727%40%3411131201
CLScalarBroadcastDivision727%40%3410121201
CPUScalaBroadcastPower.new CPUBiFun() {...}510%n/a55555511
CLScalarBroadcast4911369%1685%21112311501
CPUScalaBroadcastPower.new CPUBiFun() {...}470%n/a55555511
CPUScalarBroadcastDivision.new CPUBiFun() {...}440%n/a55555511
CPUScalarBroadcastModulo.new CPUBiFun() {...}440%n/a55555511
CPUBroadcast3572795%108088%105321600801
CPUBroadcastSummation.new CPUBiFun() {...}350%40%99777711
CPUScalarBroadcastAddition322543%3350%594102601
CPUScalaBroadcastPower.new CPUBiFun() {...}320%n/a55555511
CPUScalarBroadcastDivision.new CPUBiFun() {...}270%n/a55555511
CPUBroadcastSummation.new CPUBiFun() {...}270%40%99777711
CPUBroadcastSummation.new CPUBiFun() {...}270%40%99777711
CPUScalarBroadcastModulo.new CPUBiFun() {...}270%n/a55555511
CPUBroadcastPower.new CPUBiFun() {...}260%n/a33333311
CLScalarBroadcastAddition246873%3350%362190301
CPUBroadcastPower.new CPUBiFun() {...}240%n/a33333311
CPUBroadcastModulo.new CPUBiFun() {...}230%n/a33333311
CPUBroadcastDivision.new CPUBiFun() {...}230%n/a33333311
CPUScalarBroadcastDivision.new CPUBiFun() {...}220%n/a55555511
CPUScalarBroadcastSubtraction.new CPUBiFun() {...}220%n/a55555511
CPUScalarBroadcastModulo.new CPUBiFun() {...}220%n/a55555511
CPUScalarBroadcast1875197%34193%334115201201
CPUBroadcastSummation180%n/a44444411
CPUScalarBroadcastAddition.new CPUBiFun() {...}180%n/a77777711
CPUScalarBroadcastSubtraction1516%n/a34343401
CPUScalarBroadcastDivision1516%n/a34343401
CPUScalaBroadcastPower1516%n/a34343401
CPUScalarBroadcastModulo1516%n/a34343401
CPUScalarBroadcastMultiplication144174%1583%392102601
CPUBroadcastModulo.new CPUBiFun() {...}140%n/a33333311
CPUBroadcastDivision.new CPUBiFun() {...}140%n/a33333311
CPUScalarBroadcastMultiplication.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastMultiplication.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastSubtraction.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastSubtraction.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastAddition.new CPUBiFun() {...}132262%40%49272701
CPUBroadcastAddition.new CPUBiFun() {...}131451%40%47252501
CPUBroadcastPower10844%n/a24242401
CPUBroadcastModulo10844%n/a24242401
CPUBroadcastDivision10844%n/a24242401
CPUBroadcastMultiplication10844%n/a24242401
CPUBroadcastMultiplication.new CPUBiFun() {...}100%n/a33333311
CPUBroadcastMultiplication.new CPUBiFun() {...}100%n/a33333311
CLBroadcast8193%2250%250150301
CPUScalarBroadcastIdentity857%n/a24242401
CPUScalarBroadcastMultiplication.new CPUBiFun() {...}1881%n/a15151501
CPUBroadcastPower.new CPUBiFun() {...}1376%n/a13131301
CPUBroadcastModulo.new CPUBiFun() {...}1071%n/a13131301
CLScalarBroadcastMultiplication10199%1787%160170201
CLScalarBroadcastIdentity51100%n/a020130201
CPUScalarBroadcastIdentity.new CPUBiFun() {...}22100%n/a09090901
CPUBroadcastSubtraction18100%n/a04040401
CPUBroadcastDivision.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastAddition14100%n/a04040401
CPUBroadcastSubtraction.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastSubtraction.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastSubtraction.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastMultiplication.new CPUBiFun() {...}14100%n/a03030301
CLBroadcastDivision100%n/a01020101
CLBroadcastModulo100%n/a01020101
CLBroadcastSubtraction100%n/a01020101
CLBroadcastAddition100%n/a01020101
CLBroadcastPower100%n/a01020101
CLBroadcastMultiplication100%n/a01020101
\ No newline at end of file +neureka.backend.main.implementations.broadcast

neureka.backend.main.implementations.broadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,749 of 3,54650%78 of 20762%2383842966701882784268
CLScalarBroadcast1620%70%111131315511
CLScalarBroadcastMultiplication1020%80%6617172211
CLScalarBroadcastAddition920%60%6619193311
CLBroadcast870%40%5515153311
CLScalarBroadcastSubtraction790%40%4413132211
CLScalarBroadcastModulo780%40%4413132211
CLScalarBroadcastPower780%40%4413132211
CLScalarBroadcastDivision780%40%4412122211
CPUScalaBroadcastPower.new CPUBiFun() {...}510%n/a55555511
CLScalarBroadcastIdentity510%n/a2213132211
CPUScalaBroadcastPower.new CPUBiFun() {...}470%n/a55555511
CPUScalarBroadcastDivision.new CPUBiFun() {...}440%n/a55555511
CPUScalarBroadcastModulo.new CPUBiFun() {...}440%n/a55555511
CPUBroadcast3572795%108088%105321600801
CPUBroadcastSummation.new CPUBiFun() {...}350%40%99777711
CPUScalarBroadcastAddition322543%3350%594102601
CPUScalaBroadcastPower.new CPUBiFun() {...}320%n/a55555511
CPUScalarBroadcastDivision.new CPUBiFun() {...}270%n/a55555511
CPUBroadcastSummation.new CPUBiFun() {...}270%40%99777711
CPUBroadcastSummation.new CPUBiFun() {...}270%40%99777711
CPUScalarBroadcastModulo.new CPUBiFun() {...}270%n/a55555511
CPUBroadcastPower.new CPUBiFun() {...}260%n/a33333311
CPUBroadcastPower.new CPUBiFun() {...}240%n/a33333311
CPUBroadcastModulo.new CPUBiFun() {...}230%n/a33333311
CPUBroadcastDivision.new CPUBiFun() {...}230%n/a33333311
CPUScalarBroadcastDivision.new CPUBiFun() {...}220%n/a55555511
CPUScalarBroadcastSubtraction.new CPUBiFun() {...}220%n/a55555511
CPUScalarBroadcastModulo.new CPUBiFun() {...}220%n/a55555511
CPUScalarBroadcast1875197%34193%334115201201
CPUBroadcastSummation180%n/a44444411
CPUScalarBroadcastAddition.new CPUBiFun() {...}180%n/a77777711
CPUScalarBroadcastSubtraction1516%n/a34343401
CPUScalarBroadcastDivision1516%n/a34343401
CPUScalaBroadcastPower1516%n/a34343401
CPUScalarBroadcastModulo1516%n/a34343401
CPUScalarBroadcastMultiplication144174%1583%392102601
CPUBroadcastModulo.new CPUBiFun() {...}140%n/a33333311
CPUBroadcastDivision.new CPUBiFun() {...}140%n/a33333311
CPUScalarBroadcastMultiplication.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastMultiplication.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastSubtraction.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastSubtraction.new CPUBiFun() {...}140%n/a55555511
CPUScalarBroadcastAddition.new CPUBiFun() {...}132262%40%49272701
CPUBroadcastAddition.new CPUBiFun() {...}131451%40%47252501
CPUBroadcastPower10844%n/a24242401
CPUBroadcastModulo10844%n/a24242401
CPUBroadcastDivision10844%n/a24242401
CPUBroadcastMultiplication10844%n/a24242401
CPUBroadcastMultiplication.new CPUBiFun() {...}100%n/a33333311
CPUBroadcastMultiplication.new CPUBiFun() {...}100%n/a33333311
CPUScalarBroadcastIdentity857%n/a24242401
CLBroadcastDivision0%n/a11221111
CLBroadcastModulo0%n/a11221111
CLBroadcastSubtraction0%n/a11221111
CLBroadcastAddition0%n/a11221111
CLBroadcastPower0%n/a11221111
CLBroadcastMultiplication0%n/a11221111
CPUScalarBroadcastMultiplication.new CPUBiFun() {...}1881%n/a15151501
CPUBroadcastPower.new CPUBiFun() {...}1376%n/a13131301
CPUBroadcastModulo.new CPUBiFun() {...}1071%n/a13131301
CPUScalarBroadcastIdentity.new CPUBiFun() {...}22100%n/a09090901
CPUBroadcastSubtraction18100%n/a04040401
CPUBroadcastDivision.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastAddition14100%n/a04040401
CPUBroadcastSubtraction.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastSubtraction.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastSubtraction.new CPUBiFun() {...}14100%n/a03030301
CPUBroadcastMultiplication.new CPUBiFun() {...}14100%n/a03030301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.source.html index cfd8e596a..15425edf5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.broadcast/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.broadcast

neureka.backend.main.implementations.broadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,275 of 3,54664%60 of 20771%2063841956701642782768
CPUScalaBroadcastPower.java1452%n/a18191516181934
CPUScalarBroadcastDivision.java1082%n/a18191516181934
CPUScalarBroadcastModulo.java1082%n/a18191516181934
CPUBroadcastSummation.java1070%120%31312222252544
CLScalarBroadcastSubtraction.java737%40%3411131201
CLScalarBroadcastModulo.java727%40%3411131201
CLScalarBroadcastDivision.java727%40%3410121201
CLScalarBroadcastPower.java727%40%3411131201
CPUScalarBroadcastSubtraction.java654%n/a18191516181934
CPUBroadcastPower.java642124%n/a91371091324
CPUScalarBroadcastAddition.java634742%7330%16251222112013
CPUBroadcastModulo.java511826%n/a91371091324
CLScalarBroadcast.java4911369%1685%21112311501
CPUBroadcastDivision.java472231%n/a81361081324
CPUScalarBroadcastMultiplication.java465956%1583%14241122132124
CPUBroadcast.java3572795%108088%105321600801
CPUBroadcastMultiplication.java302242%n/a81361081324
CLScalarBroadcastAddition.java246873%3350%362190301
CPUScalarBroadcast.java1875197%34193%334115201201
CPUBroadcastAddition.java132868%40%411282902
CLBroadcast.java8193%2250%250150301
CPUScalarBroadcastIdentity.java3083%n/a21321221302
CLScalarBroadcastMultiplication.java10199%1787%160170201
CPUBroadcastSubtraction.java60100%n/a01301001304
CLScalarBroadcastIdentity.java51100%n/a020130201
CLBroadcastPower.java100%n/a01020101
CLBroadcastSubtraction.java100%n/a01020101
CLBroadcastModulo.java100%n/a01020101
CLBroadcastMultiplication.java100%n/a01020101
CLBroadcastDivision.java100%n/a01020101
CLBroadcastAddition.java100%n/a01020101
\ No newline at end of file +neureka.backend.main.implementations.broadcast

neureka.backend.main.implementations.broadcast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,749 of 3,54650%78 of 20762%2383842966701882784268
CLScalarBroadcast.java1620%70%111131315511
CPUScalaBroadcastPower.java1452%n/a18191516181934
CPUScalarBroadcastDivision.java1082%n/a18191516181934
CPUScalarBroadcastModulo.java1082%n/a18191516181934
CPUBroadcastSummation.java1070%120%31312222252544
CLScalarBroadcastMultiplication.java1020%80%6617172211
CLScalarBroadcastAddition.java920%60%6619193311
CLBroadcast.java870%40%5515153311
CLScalarBroadcastSubtraction.java790%40%4413132211
CLScalarBroadcastPower.java780%40%4413132211
CLScalarBroadcastModulo.java780%40%4413132211
CLScalarBroadcastDivision.java780%40%4412122211
CPUScalarBroadcastSubtraction.java654%n/a18191516181934
CPUBroadcastPower.java642124%n/a91371091324
CPUScalarBroadcastAddition.java634742%7330%16251222112013
CPUBroadcastModulo.java511826%n/a91371091324
CLScalarBroadcastIdentity.java510%n/a2213132211
CPUBroadcastDivision.java472231%n/a81361081324
CPUScalarBroadcastMultiplication.java465956%1583%14241122132124
CPUBroadcast.java3572795%108088%105321600801
CPUBroadcastMultiplication.java302242%n/a81361081324
CPUScalarBroadcast.java1875197%34193%334115201201
CPUBroadcastAddition.java132868%40%411282902
CPUScalarBroadcastIdentity.java3083%n/a21321221302
CLBroadcastSubtraction.java0%n/a11221111
CLBroadcastDivision.java0%n/a11221111
CLBroadcastPower.java0%n/a11221111
CLBroadcastModulo.java0%n/a11221111
CLBroadcastMultiplication.java0%n/a11221111
CLBroadcastAddition.java0%n/a11221111
CPUBroadcastSubtraction.java60100%n/a01301001304
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.html index dddffffbb..f710f2ea2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.html @@ -1 +1 @@ -AbstractCPUConvolution

AbstractCPUConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total160 of 1,30387%31 of 16481%259529261013
_deConvolve64(Tensor, Tensor, Tensor, int, int, CPUBiFun)7723975%123473%924136401
_deConvolve32(Tensor, Tensor, Tensor, int, int, CPUBiFun)6523077%93177%621136101
_workloadFor(ExecutionCall)147383%2880%2611801
_convolve64(Tensor, Tensor, Tensor, int, int, CPUBiFun)26099%42887%41715301
_convolve32(Tensor, Tensor, Tensor, int, int, CPUBiFun)26099%42887%41715301
run(ExecutionCall)32100%4100%030501
_doNDConvolutionFor(ExecutionCall)13100%n/a010601
lambda$_workloadFor$3(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
lambda$_workloadFor$2(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
lambda$_workloadFor$1(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
lambda$_workloadFor$0(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
AbstractCPUConvolution()3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +AbstractCPUConvolution

AbstractCPUConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total160 of 1,30387%31 of 16481%259529261013
_deConvolve64(Tensor, Tensor, Tensor, int, int, CPUBiFun)7723975%123473%924136401
_deConvolve32(Tensor, Tensor, Tensor, int, int, CPUBiFun)6523077%93177%621136101
_workloadFor(ExecutionCall)147383%2880%2611801
_convolve64(Tensor, Tensor, Tensor, int, int, CPUBiFun)26099%42887%41715301
_convolve32(Tensor, Tensor, Tensor, int, int, CPUBiFun)26099%42887%41715301
run(ExecutionCall)32100%4100%030501
_doNDConvolutionFor(ExecutionCall)13100%n/a010601
lambda$_workloadFor$3(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
lambda$_workloadFor$2(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
lambda$_workloadFor$1(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
lambda$_workloadFor$0(Tensor, Tensor, Tensor, CPUBiFun, int, int)8100%n/a010101
AbstractCPUConvolution()3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.java.html index 0fe996737..20ad1daa6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/AbstractCPUConvolution.java.html @@ -408,4 +408,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.html index 973b36230..455219452 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.html @@ -1 +1 @@ -CLConvolution

CLConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 8793%2 of 450%2501503
lambda$new$0(ExecutionCall)66191%2250%2301101
CLConvolution(String)13100%n/a010301
lambda$new$1(KernelCode)7100%n/a010101
\ No newline at end of file +CLConvolution

CLConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total87 of 870%4 of 40%55151533
lambda$new$0(ExecutionCall)670%40%33111111
CLConvolution(String)130%n/a113311
lambda$new$1(KernelCode)70%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.java.html index 6223120d2..2d5615d57 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CLConvolution.java.html @@ -8,28 +8,28 @@ public class CLConvolution extends ParsedCLImplementation { public CLConvolution( String id ) { - super( call -> { - int offset = ( call.input( Number.class, 0 ) != null ) ? 0 : 1; - int gwz = ( call.input( Number.class, 0 ) != null ) ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size(); - call.getDevice() - .getKernel(call) - .passAllOf( call.input( Number.class, offset ) ) - .passAllOf( call.input( Number.class, offset + 1 ) ) - .passAllOf( call.input( Number.class, offset + 2 ) ) - .pass( call.input( Number.class, 0 ).rank() ) - .pass( call.getValOf( Arg.DerivIdx.class ) ) - .call( gwz ); + super( call -> { + int offset = ( call.input( Number.class, 0 ) != null ) ? 0 : 1; + int gwz = ( call.input( Number.class, 0 ) != null ) ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size(); + call.getDevice() + .getKernel(call) + .passAllOf( call.input( Number.class, offset ) ) + .passAllOf( call.input( Number.class, offset + 1 ) ) + .passAllOf( call.input( Number.class, offset + 2 ) ) + .pass( call.input( Number.class, 0 ).rank() ) + .pass( call.getValOf( Arg.DerivIdx.class ) ) + .call( gwz ); - return call.input( 0 ); + return call.input( 0 ); }, 3, - Neureka.get().utility().readResource("kernels/convolution_template.cl"), + Neureka.get().utility().readResource("kernels/convolution_template.cl"), "value = src1 * src2;\n", "value += handle * drain;\n", id, - kernelCode -> new KernelCode[]{kernelCode} + kernelCode -> new KernelCode[]{kernelCode} ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution$1.html index e06be914b..c70eb364a 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution$1.html @@ -1 +1 @@ -CPUConvolution.new CPUBiFun() {...}

CPUConvolution.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 2263%0 of 0n/a252525
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file +CPUConvolution.new CPUBiFun() {...}

CPUConvolution.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 2263%0 of 0n/a252525
invoke(int, int)40%n/a111111
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.html index ffec6fb20..c9fa945f6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.html @@ -1 +1 @@ -CPUConvolution

CPUConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 8100%0 of 0n/a020202
_getFun()5100%n/a010101
CPUConvolution()3100%n/a010101
\ No newline at end of file +CPUConvolution

CPUConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 8100%0 of 0n/a020202
_getFun()5100%n/a010101
CPUConvolution()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.java.html index d1f3dbfe4..c49a2271b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/CPUConvolution.java.html @@ -15,4 +15,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF32.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF32.html index f8c2c2ec6..4f6aa50cc 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF32.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF32.html @@ -1 +1 @@ -SimpleCPUConvolution.ImplF32

SimpleCPUConvolution.ImplF32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 16992%2 of 1485%21022903
run()72275%1375%131501
SimpleCPUConvolution.ImplF32(float[], float[], float[], int, int, int, int, int, int, int)54489%1150%1211401
run(int)91100%8100%0501001
\ No newline at end of file +SimpleCPUConvolution.ImplF32

SimpleCPUConvolution.ImplF32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 16992%2 of 1485%21022903
run()72275%1375%131501
SimpleCPUConvolution.ImplF32(float[], float[], float[], int, int, int, int, int, int, int)54489%1150%1211401
run(int)91100%8100%0501001
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF64.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF64.html index f1a8c45b6..5473ee397 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF64.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution$ImplF64.html @@ -1 +1 @@ -SimpleCPUConvolution.ImplF64

SimpleCPUConvolution.ImplF64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 16992%2 of 1485%21022903
run()72275%1375%131501
SimpleCPUConvolution.ImplF64(double[], double[], double[], int, int, int, int, int, int, int)54489%1150%1211401
run(int)91100%8100%0501001
\ No newline at end of file +SimpleCPUConvolution.ImplF64

SimpleCPUConvolution.ImplF64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 16992%2 of 1485%21022903
run()72275%1375%131501
SimpleCPUConvolution.ImplF64(double[], double[], double[], int, int, int, int, int, int, int)54489%1150%1211401
run(int)91100%8100%0501001
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.html index 4e5cffe35..891d219a7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.html @@ -1 +1 @@ -SimpleCPUConvolution

SimpleCPUConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 24695%3 of 3090%32015005
_tryCreatingImplFor(Tensor, Tensor, Tensor)517197%21688%21013201
run()5758%1150%120301
validate(Tensor)29100%8100%050601
SimpleCPUConvolution(Tensor, Tensor, Tensor)22100%n/a010801
isSuitable()7100%2100%020101
\ No newline at end of file +SimpleCPUConvolution

SimpleCPUConvolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 24695%3 of 3090%32015005
_tryCreatingImplFor(Tensor, Tensor, Tensor)517197%21688%21013201
run()5758%1150%120301
validate(Tensor)29100%8100%050601
SimpleCPUConvolution(Tensor, Tensor, Tensor)22100%n/a010801
isSuitable()7100%2100%020101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.java.html index 57ce98d42..ab6c5e354 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/SimpleCPUConvolution.java.html @@ -233,4 +233,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.html index bf8a1d64f..2471f85fa 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.convolution

neureka.backend.main.implementations.convolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total208 of 2,00489%40 of 22682%361473639023407
AbstractCPUConvolution1601,14387%3113381%25952926101301
SimpleCPUConvolution.ImplF641215792%21285%2102290301
SimpleCPUConvolution.ImplF321215792%21285%2102290301
SimpleCPUConvolution23695%32790%3201500501
CPUConvolution.new CPUBiFun() {...}1463%n/a25252501
CLConvolution8193%2250%250150301
CPUConvolution100%n/a02020201
\ No newline at end of file +neureka.backend.main.implementations.convolution

neureka.backend.main.implementations.convolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total289 of 2,00485%42 of 22681%391475139053417
AbstractCPUConvolution1601,14387%3113381%25952926101301
CLConvolution870%40%5515153311
SimpleCPUConvolution.ImplF641215792%21285%2102290301
SimpleCPUConvolution.ImplF321215792%21285%2102290301
SimpleCPUConvolution23695%32790%3201500501
CPUConvolution.new CPUBiFun() {...}1463%n/a25252501
CPUConvolution100%n/a02020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.source.html index 364b6628a..35972a6c5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.convolution/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.convolution

neureka.backend.main.implementations.convolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total208 of 2,00489%40 of 22682%361473639023407
AbstractCPUConvolution.java1601,14387%3113381%25952926101301
SimpleCPUConvolution.java3455094%75187%740510801103
CPUConvolution.java2273%n/a27262702
CLConvolution.java8193%2250%250150301
\ No newline at end of file +neureka.backend.main.implementations.convolution

neureka.backend.main.implementations.convolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total289 of 2,00485%42 of 22681%391475139053417
AbstractCPUConvolution.java1601,14387%3113381%25952926101301
CLConvolution.java870%40%5515153311
SimpleCPUConvolution.java3455094%75187%740510801103
CPUConvolution.java2273%n/a27262702
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.html index 9bb570e9c..017ab5544 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.html @@ -1 +1 @@ -CLBiElementwise

CLBiElementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 8592%2 of 450%2501503
lambda$new$0(ExecutionCall)65990%2250%2301101
CLBiElementwise(String, String, String)13100%n/a010301
lambda$new$1(KernelCode)7100%n/a010101
\ No newline at end of file +CLBiElementwise

CLBiElementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total85 of 850%4 of 40%55151533
lambda$new$0(ExecutionCall)650%40%33111111
CLBiElementwise(String, String, String)130%n/a113311
lambda$new$1(KernelCode)70%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.java.html index 667b05fff..8728a2bc3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwise.java.html @@ -7,28 +7,28 @@ public class CLBiElementwise extends ParsedCLImplementation { public CLBiElementwise( String postfix, String activationSource, String differentiationSource ) { - super( + super( call -> { - int offset = (call.input( Number.class, 0 ) != null) ? 0 : 1; - int gwz = (call.input( Number.class, 0 ) != null) ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size(); - call.getDevice() - .getKernel(call) - .passAllOf( call.input( Number.class, offset ) ) - .passAllOf( call.input( Number.class, offset + 1 ) ) - .passAllOf( call.input( Number.class, offset + 2 ) ) - .pass( call.input( Number.class, 0 ).rank() ) - .pass( call.getDerivativeIndex() ) - .call( gwz ); + int offset = (call.input( Number.class, 0 ) != null) ? 0 : 1; + int gwz = (call.input( Number.class, 0 ) != null) ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size(); + call.getDevice() + .getKernel(call) + .passAllOf( call.input( Number.class, offset ) ) + .passAllOf( call.input( Number.class, offset + 1 ) ) + .passAllOf( call.input( Number.class, offset + 2 ) ) + .pass( call.input( Number.class, 0 ).rank() ) + .pass( call.getDerivativeIndex() ) + .call( gwz ); - return call.input( 0 ); + return call.input( 0 ); }, -1, - Neureka.get().utility().readResource("kernels/elementwise_template.cl"), + Neureka.get().utility().readResource("kernels/elementwise_template.cl"), activationSource, differentiationSource, postfix, - kernelCode -> new KernelCode[]{kernelCode} + kernelCode -> new KernelCode[]{kernelCode} ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.html index ecc9fb962..51e4b0ee8 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.html @@ -1 +1 @@ -CLBiElementwiseAddition

CLBiElementwiseAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBiElementwiseAddition(String)6100%n/a010201
\ No newline at end of file +CLBiElementwiseAddition

CLBiElementwiseAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBiElementwiseAddition(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.java.html index 305126e18..6d5eab99b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseAddition.java.html @@ -3,11 +3,11 @@ public class CLBiElementwiseAddition extends CLBiElementwise { public CLBiElementwiseAddition(String postfix) { - super( + super( postfix, "output = input1 + input2;\n", "output = 1;\n" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.html index f33d3e987..8991e0dd7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.html @@ -1 +1 @@ -CLBiElementwiseDivision

CLBiElementwiseDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBiElementwiseDivision(String)6100%n/a010201
\ No newline at end of file +CLBiElementwiseDivision

CLBiElementwiseDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBiElementwiseDivision(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.java.html index d1128f9a4..353f598a4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseDivision.java.html @@ -3,11 +3,11 @@ public class CLBiElementwiseDivision extends CLBiElementwise { public CLBiElementwiseDivision(String postfix) { - super( + super( postfix, "output = input1 / input2;\n", "output = ( d == 0 ? 1 / input2 : -input2 / (float)pow(input1, 2.0f) ); \n" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.html index 35a8deee6..310301619 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.html @@ -1 +1 @@ -CLBiElementwiseModulo

CLBiElementwiseModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBiElementwiseModulo(String)6100%n/a010201
\ No newline at end of file +CLBiElementwiseModulo

CLBiElementwiseModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBiElementwiseModulo(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.java.html index 0c9b2fc46..cc6c2019c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseModulo.java.html @@ -3,11 +3,11 @@ public class CLBiElementwiseModulo extends CLBiElementwise { public CLBiElementwiseModulo(String postfix) { - super( + super( postfix, "output = ((int)input1) % ((int)input2);\n", "output = ( d == 0 ? 1/input2 : -input2 / (float) pow(input1, 2.0f) );\n" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.html index 7eba44d56..c48223586 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.html @@ -1 +1 @@ -CLBiElementwiseMultiplication

CLBiElementwiseMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBiElementwiseMultiplication(String)6100%n/a010201
\ No newline at end of file +CLBiElementwiseMultiplication

CLBiElementwiseMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBiElementwiseMultiplication(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.java.html index 42a4d69fc..58181d8df 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseMultiplication.java.html @@ -3,11 +3,11 @@ public class CLBiElementwiseMultiplication extends CLBiElementwise { public CLBiElementwiseMultiplication(String postfix) { - super( + super( postfix, "output = input1 * input2;\n", "output = input2;\n" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.html index 3cbd3cb30..33de0be96 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.html @@ -1 +1 @@ -CLBiElementwisePower

CLBiElementwisePower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBiElementwisePower(String)6100%n/a010201
\ No newline at end of file +CLBiElementwisePower

CLBiElementwisePower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBiElementwisePower(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.java.html index b743ba297..c2174f73f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwisePower.java.html @@ -3,7 +3,7 @@ public class CLBiElementwisePower extends CLBiElementwise { public CLBiElementwisePower(String postfix) { - super(postfix, + super(postfix, "output = pow(input1, input2);", "if ( d == 0 ) { \n" + " output = input2 * pow(input1, input2-1.0f); \n" + @@ -11,6 +11,6 @@ " output = pow(input1, input2) * log(input1); \n" + "}" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.html index b29e50b5a..b9d386b13 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.html @@ -1 +1 @@ -CLBiElementwiseSubtraction

CLBiElementwiseSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010201
CLBiElementwiseSubtraction(String)6100%n/a010201
\ No newline at end of file +CLBiElementwiseSubtraction

CLBiElementwiseSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a112211
CLBiElementwiseSubtraction(String)60%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.java.html index ca34f6aad..7a1a4bae6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLBiElementwiseSubtraction.java.html @@ -3,11 +3,11 @@ public class CLBiElementwiseSubtraction extends CLBiElementwise { public CLBiElementwiseSubtraction(String postfix) { - super( + super( postfix, "output = input1 - input2;\n", "output = 1;\n" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.html index 2f893b65e..b3a1303d5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.html @@ -1 +1 @@ -CLElementwiseFunction

CLElementwiseFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 9993%3 of 650%3601803
_run(ExecutionCall)67092%3350%3401101
CLElementwiseFunction(ScalarFun)16100%n/a010601
lambda$new$0(KernelCode)7100%n/a010101
\ No newline at end of file +CLElementwiseFunction

CLElementwiseFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total99 of 990%6 of 60%66181833
_run(ExecutionCall)760%60%44111111
CLElementwiseFunction(ScalarFun)160%n/a116611
lambda$new$0(KernelCode)70%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.java.html index ddfdd11e3..14e392629 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLElementwiseFunction.java.html @@ -13,33 +13,33 @@ { public CLElementwiseFunction( ScalarFun fun ) { - super( + super( CLElementwiseFunction::_run, 2, - Neureka.get().utility().readResource("kernels/activation_template.cl"), - fun.activationCode(), - fun.derivationCode(), - fun.id(), - kernelCode -> new KernelCode[]{kernelCode} + Neureka.get().utility().readResource("kernels/activation_template.cl"), + fun.activationCode(), + fun.derivationCode(), + fun.id(), + kernelCode -> new KernelCode[]{kernelCode} ); - } + } private static Tensor<?> _run(ExecutionCall<OpenCLDevice> call ) { - int offset = call.input( Number.class, 0 ) != null ? 0 : 1; - int gwz = call.input( Number.class, 0 ) != null ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size(); + int offset = call.input( Number.class, 0 ) != null ? 0 : 1; + int gwz = call.input( Number.class, 0 ) != null ? call.input( Number.class, 0 ).size() : call.input( Number.class, 1 ).size(); // Drain tensor needs to be 'actual'! : - if ( !call.input( Number.class, offset + 1).isVirtual() ) call.input( Number.class, offset).mut().setIsVirtual( false ); - call.getDevice() - .getKernel(call) - .passAllOf( call.input( Number.class, offset ) ) - .passAllOf( call.input( Number.class, offset + 1 ) ) - .pass( call.input( Number.class, 0 ).rank() ) - .pass( call.getValOf( Arg.DerivIdx.class ) ) - .call( gwz ); + if ( !call.input( Number.class, offset + 1).isVirtual() ) call.input( Number.class, offset).mut().setIsVirtual( false ); + call.getDevice() + .getKernel(call) + .passAllOf( call.input( Number.class, offset ) ) + .passAllOf( call.input( Number.class, offset + 1 ) ) + .pass( call.input( Number.class, 0 ).rank() ) + .pass( call.getValOf( Arg.DerivIdx.class ) ) + .call( gwz ); - return call.input( 0 ); + return call.input( 0 ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.html index 8bf7b8883..c7e3a05b7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.html @@ -1 +1 @@ -CLRandomization

CLRandomization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 837%0 of 0n/a121212
run(ExecutionCall)50%n/a111111
CLRandomization()3100%n/a010101
\ No newline at end of file +CLRandomization

CLRandomization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 80%0 of 0n/a222222
run(ExecutionCall)50%n/a111111
CLRandomization()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.java.html index a127251f7..aedc956d5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CLRandomization.java.html @@ -5,10 +5,10 @@ import neureka.backend.api.ImplementationFor; import neureka.devices.opencl.OpenCLDevice; -public class CLRandomization implements ImplementationFor<OpenCLDevice> { +public class CLRandomization implements ImplementationFor<OpenCLDevice> { @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { throw new IllegalStateException("Not yet implemented"); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.html index b0bbfda19..891cf37fb 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.html @@ -1 +1 @@ -CPUBiElementWise

CPUBiElementWise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total118 of 64281%28 of 8667%25595109316
_newWorkloadI32(Tensor, Tensor, Tensor, CPUBiFun)247475%91155%71111401
_newWorkloadF64(Tensor, Tensor, Tensor, CPUBiFun)247375%81260%61111401
_newWorkloadF32(Tensor, Tensor, Tensor, CPUBiFun)247375%91155%71111401
lambda$_newWorkloadI32$6(int[], CPUBiFun, int[], int[], int, int)120%n/a111111
lambda$_newWorkloadF32$3(float[], CPUBiFun, float[], float[], int, int)120%n/a111111
lambda$_newWorkloadF64$0(double[], CPUBiFun, double[], double[], int, int)120%n/a111111
_workloadFor(ExecutionCall)57393%11191%1711301
run(ExecutionCall)52080%1150%121801
lambda$_newWorkloadI32$8(Tensor, Tensor, Tensor, int[], CPUBiFun, int[], int[], int, int)50100%2100%0201301
lambda$_newWorkloadF32$5(Tensor, Tensor, Tensor, float[], CPUBiFun, float[], float[], int, int)50100%2100%0201301
lambda$_newWorkloadF64$2(Tensor, Tensor, Tensor, double[], CPUBiFun, double[], double[], int, int)50100%2100%0201301
lambda$_newWorkloadI32$7(int[], CPUBiFun, int[], int[], int, int)19100%2100%020201
lambda$_newWorkloadF32$4(float[], CPUBiFun, float[], float[], int, int)19100%2100%020201
lambda$_newWorkloadF64$1(double[], CPUBiFun, double[], double[], int, int)19100%2100%020201
CPUBiElementWise()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +CPUBiElementWise

CPUBiElementWise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total118 of 64281%28 of 8667%25595109316
_newWorkloadI32(Tensor, Tensor, Tensor, CPUBiFun)247475%91155%71111401
_newWorkloadF64(Tensor, Tensor, Tensor, CPUBiFun)247375%81260%61111401
_newWorkloadF32(Tensor, Tensor, Tensor, CPUBiFun)247375%91155%71111401
lambda$_newWorkloadI32$6(int[], CPUBiFun, int[], int[], int, int)120%n/a111111
lambda$_newWorkloadF32$3(float[], CPUBiFun, float[], float[], int, int)120%n/a111111
lambda$_newWorkloadF64$0(double[], CPUBiFun, double[], double[], int, int)120%n/a111111
_workloadFor(ExecutionCall)57393%11191%1711301
run(ExecutionCall)52080%1150%121801
lambda$_newWorkloadI32$8(Tensor, Tensor, Tensor, int[], CPUBiFun, int[], int[], int, int)50100%2100%0201301
lambda$_newWorkloadF32$5(Tensor, Tensor, Tensor, float[], CPUBiFun, float[], float[], int, int)50100%2100%0201301
lambda$_newWorkloadF64$2(Tensor, Tensor, Tensor, double[], CPUBiFun, double[], double[], int, int)50100%2100%0201301
lambda$_newWorkloadI32$7(int[], CPUBiFun, int[], int[], int, int)19100%2100%020201
lambda$_newWorkloadF32$4(float[], CPUBiFun, float[], float[], int, int)19100%2100%020201
lambda$_newWorkloadF64$1(double[], CPUBiFun, double[], double[], int, int)19100%2100%020201
CPUBiElementWise()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.java.html index 504dff251..d4ac7fc03 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWise.java.html @@ -186,4 +186,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$1.html index 237e548fa..5e45ff0b2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$1.html @@ -1 +1 @@ -CPUBiElementWiseAddition.new CPUBiFun() {...}

CPUBiElementWiseAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 3551%4 of 40%593737
invoke(boolean, boolean)80%40%331111
invoke(char, char)50%n/a111111
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file +CPUBiElementWiseAddition.new CPUBiFun() {...}

CPUBiElementWiseAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 3551%4 of 40%593737
invoke(boolean, boolean)80%40%331111
invoke(char, char)50%n/a111111
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$2.html index 911c65c86..08d23480f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition$2.html @@ -1 +1 @@ -CPUBiElementWiseAddition.new CPUBiFun() {...}

CPUBiElementWiseAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 1833%0 of 0n/a676767
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
invoke(boolean, boolean)20%n/a111111
invoke(char, char)20%n/a111111
{...}6100%n/a010101
\ No newline at end of file +CPUBiElementWiseAddition.new CPUBiFun() {...}

CPUBiElementWiseAddition.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 1833%0 of 0n/a676767
invoke(double, double)20%n/a111111
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
invoke(long, long)20%n/a111111
invoke(boolean, boolean)20%n/a111111
invoke(char, char)20%n/a111111
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.html index b8caa5cfb..70f056b41 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.html @@ -1 +1 @@ -CPUBiElementWiseAddition

CPUBiElementWiseAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 1681%0 of 0n/a141414
_getDeriveAt1()30%n/a111111
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
CPUBiElementWiseAddition()3100%n/a010101
\ No newline at end of file +CPUBiElementWiseAddition

CPUBiElementWiseAddition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 1681%0 of 0n/a141414
_getDeriveAt1()30%n/a111111
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
CPUBiElementWiseAddition()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.java.html index 025dab610..6eea7bcd8 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseAddition.java.html @@ -34,4 +34,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$1.html index e8b87d1d2..d1ee80cd4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$1.html @@ -1 +1 @@ -CPUBiElementWiseDivision.new CPUBiFun() {...}

CPUBiElementWiseDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2281%0 of 0n/a151515
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file +CPUBiElementWiseDivision.new CPUBiFun() {...}

CPUBiElementWiseDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2281%0 of 0n/a151515
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$2.html index 61b9d48c8..287679a51 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$2.html @@ -1 +1 @@ -CPUBiElementWiseDivision.new CPUBiFun() {...}

CPUBiElementWiseDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 2722%0 of 0n/a454545
invoke(int, int)70%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
{...}6100%n/a010101
\ No newline at end of file +CPUBiElementWiseDivision.new CPUBiFun() {...}

CPUBiElementWiseDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 2722%0 of 0n/a454545
invoke(int, int)70%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$3.html index 560de1668..d8ea05348 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision$3.html @@ -1 +1 @@ -CPUBiElementWiseDivision.new CPUBiFun() {...}

CPUBiElementWiseDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUBiElementWiseDivision.new CPUBiFun() {...}

CPUBiElementWiseDivision.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.html index 73ccd7607..36e72e117 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.html @@ -1 +1 @@ -CPUBiElementWiseDivision

CPUBiElementWiseDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1872%0 of 0n/a141414
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
CPUBiElementWiseDivision()3100%n/a010101
\ No newline at end of file +CPUBiElementWiseDivision

CPUBiElementWiseDivision

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1872%0 of 0n/a141414
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
CPUBiElementWiseDivision()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.java.html index 730b9862b..7d093bbc4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseDivision.java.html @@ -34,4 +34,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$1.html index 98ee6104c..87289b275 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$1.html @@ -1 +1 @@ -CPUBiElementWiseModulo.new CPUBiFun() {...}

CPUBiElementWiseModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2281%0 of 0n/a151515
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file +CPUBiElementWiseModulo.new CPUBiFun() {...}

CPUBiElementWiseModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2281%0 of 0n/a151515
invoke(long, long)40%n/a111111
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$2.html index 68eff0843..058e6b4e7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$2.html @@ -1 +1 @@ -CPUBiElementWiseModulo.new CPUBiFun() {...}

CPUBiElementWiseModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 2722%0 of 0n/a454545
invoke(int, int)70%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
{...}6100%n/a010101
\ No newline at end of file +CPUBiElementWiseModulo.new CPUBiFun() {...}

CPUBiElementWiseModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 2722%0 of 0n/a454545
invoke(int, int)70%n/a111111
invoke(long, long)60%n/a111111
invoke(double, double)40%n/a111111
invoke(float, float)40%n/a111111
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$3.html index 3caa8e387..0b3b88362 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo$3.html @@ -1 +1 @@ -CPUBiElementWiseModulo.new CPUBiFun() {...}

CPUBiElementWiseModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +CPUBiElementWiseModulo.new CPUBiFun() {...}

CPUBiElementWiseModulo.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 440%0 of 0n/a555555
invoke(int, int)110%n/a111111
invoke(float, float)100%n/a111111
invoke(long, long)100%n/a111111
invoke(double, double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.html index 5e5c3479b..4b6b61c43 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.html @@ -1 +1 @@ -CPUBiElementWiseModulo

CPUBiElementWiseModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1872%0 of 0n/a141414
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
CPUBiElementWiseModulo()3100%n/a010101
\ No newline at end of file +CPUBiElementWiseModulo

CPUBiElementWiseModulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1872%0 of 0n/a141414
_getDeriveAt1()50%n/a111111
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
CPUBiElementWiseModulo()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.java.html index bdb9407f0..669b0c59e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseModulo.java.html @@ -34,4 +34,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$1.html index 64692c415..2c54762cf 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$1.html @@ -1 +1 @@ -CPUBiElementWiseMultiplication.new CPUBiFun() {...}

CPUBiElementWiseMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file +CPUBiElementWiseMultiplication.new CPUBiFun() {...}

CPUBiElementWiseMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$2.html index 5e6c6976b..6e5909da6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$2.html @@ -1 +1 @@ -CPUBiElementWiseMultiplication.new CPUBiFun() {...}

CPUBiElementWiseMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1266%0 of 0n/a242424
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
\ No newline at end of file +CPUBiElementWiseMultiplication.new CPUBiFun() {...}

CPUBiElementWiseMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1266%0 of 0n/a242424
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$3.html index 0177172c1..ff8fab391 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication$3.html @@ -1 +1 @@ -CPUBiElementWiseMultiplication.new CPUBiFun() {...}

CPUBiElementWiseMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1266%0 of 0n/a242424
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
\ No newline at end of file +CPUBiElementWiseMultiplication.new CPUBiFun() {...}

CPUBiElementWiseMultiplication.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1266%0 of 0n/a242424
invoke(float, float)20%n/a111111
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.html index 0a1a26d5a..3f63fd263 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.html @@ -1 +1 @@ -CPUBiElementWiseMultiplication

CPUBiElementWiseMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBiElementWiseMultiplication()3100%n/a010101
\ No newline at end of file +CPUBiElementWiseMultiplication

CPUBiElementWiseMultiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBiElementWiseMultiplication()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.java.html index 75b28a363..4401825b8 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseMultiplication.java.html @@ -31,4 +31,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$1.html index adde17123..edda14547 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$1.html @@ -1 +1 @@ -CPUBiElementWisePower.new CPUBiFun() {...}

CPUBiElementWisePower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 3253%0 of 0n/a252525
invoke(int, int)80%n/a111111
invoke(long, long)70%n/a111111
invoke(float, float)7100%n/a010101
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
\ No newline at end of file +CPUBiElementWisePower.new CPUBiFun() {...}

CPUBiElementWisePower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 3253%0 of 0n/a252525
invoke(int, int)80%n/a111111
invoke(long, long)70%n/a111111
invoke(float, float)7100%n/a010101
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$2.html index 64a9d4194..89c6b4ff3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$2.html @@ -1 +1 @@ -CPUBiElementWisePower.new CPUBiFun() {...}

CPUBiElementWisePower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 5150%0 of 0n/a252525
invoke(int, int)130%n/a111111
invoke(long, long)120%n/a111111
invoke(float, float)12100%n/a010101
invoke(double, double)8100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +CPUBiElementWisePower.new CPUBiFun() {...}

CPUBiElementWisePower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 5150%0 of 0n/a252525
invoke(int, int)130%n/a111111
invoke(long, long)120%n/a111111
invoke(float, float)12100%n/a010101
invoke(double, double)8100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$3.html index f340010af..17a9c8881 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower$3.html @@ -1 +1 @@ -CPUBiElementWisePower.new CPUBiFun() {...}

CPUBiElementWisePower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 4751%0 of 0n/a252525
invoke(int, int)120%n/a111111
invoke(long, long)110%n/a111111
invoke(float, float)11100%n/a010101
invoke(double, double)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +CPUBiElementWisePower.new CPUBiFun() {...}

CPUBiElementWisePower.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 4751%0 of 0n/a252525
invoke(int, int)120%n/a111111
invoke(long, long)110%n/a111111
invoke(float, float)11100%n/a010101
invoke(double, double)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.html index 34b6ad3c2..157a36b6f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.html @@ -1 +1 @@ -CPUBiElementWisePower

CPUBiElementWisePower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBiElementWisePower()3100%n/a010101
\ No newline at end of file +CPUBiElementWisePower

CPUBiElementWisePower

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBiElementWisePower()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.java.html index ee69bd6ad..880a4b8ae 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWisePower.java.html @@ -34,4 +34,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$1.html index 0f8a2c1ab..a19a1e331 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$1.html @@ -1 +1 @@ -CPUBiElementWiseSubtraction.new CPUBiFun() {...}

CPUBiElementWiseSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file +CPUBiElementWiseSubtraction.new CPUBiFun() {...}

CPUBiElementWiseSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
{...}6100%n/a010101
invoke(double, double)4100%n/a010101
invoke(float, float)4100%n/a010101
invoke(int, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$2.html index 69dc74c15..d0e3c463a 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$2.html @@ -1 +1 @@ -CPUBiElementWiseSubtraction.new CPUBiFun() {...}

CPUBiElementWiseSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1283%0 of 0n/a141414
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
invoke(float, float)2100%n/a010101
\ No newline at end of file +CPUBiElementWiseSubtraction.new CPUBiFun() {...}

CPUBiElementWiseSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1283%0 of 0n/a141414
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
invoke(float, float)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$3.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$3.html index ec42ca6da..4675ba125 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$3.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction$3.html @@ -1 +1 @@ -CPUBiElementWiseSubtraction.new CPUBiFun() {...}

CPUBiElementWiseSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1283%0 of 0n/a141414
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
invoke(float, float)2100%n/a010101
\ No newline at end of file +CPUBiElementWiseSubtraction.new CPUBiFun() {...}

CPUBiElementWiseSubtraction.new CPUBiFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1283%0 of 0n/a141414
invoke(int, int)20%n/a111111
{...}6100%n/a010101
invoke(double, double)2100%n/a010101
invoke(float, float)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.html index 3eee71c2c..50820edff 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.html @@ -1 +1 @@ -CPUBiElementWiseSubtraction

CPUBiElementWiseSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBiElementWiseSubtraction()3100%n/a010101
\ No newline at end of file +CPUBiElementWiseSubtraction

CPUBiElementWiseSubtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040404
_getFun()5100%n/a010101
_getDeriveAt0()5100%n/a010101
_getDeriveAt1()5100%n/a010101
CPUBiElementWiseSubtraction()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.java.html index 37615ebfa..14bb9936d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUBiElementWiseSubtraction.java.html @@ -31,4 +31,4 @@ }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.html index c52e53acf..e3e436800 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.html @@ -1 +1 @@ -CPUElementwiseAssignFun

CPUElementwiseAssignFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total45 of 39388%12 of 4271%102546604
run(ExecutionCall)4533988%123071%102246401
CPUElementwiseAssignFun()4100%n/a010101
lambda$run$0(Tensor)4100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +CPUElementwiseAssignFun

CPUElementwiseAssignFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total45 of 39388%12 of 4271%102546604
run(ExecutionCall)4533988%123071%102246401
CPUElementwiseAssignFun()4100%n/a010101
lambda$run$0(Tensor)4100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.java.html index baa403d58..78d80be57 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseAssignFun.java.html @@ -97,4 +97,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.html index bd5bca829..0cb3d04e5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.html @@ -1 +1 @@ -CPUElementwiseFunction

CPUElementwiseFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total97 of 91689%12 of 8686%126613208323
_workloadFor(ExecutionCall)4933987%64287%62578201
lambda$_workloadFor$15(char[], CPUFun, char[], int, int)160%20%222211
lambda$_workloadFor$13(boolean[], CPUFun, boolean[], int, int)160%20%222211
lambda$_workloadFor$11(short[], CPUFun, short[], int, int)160%20%222211
lambda$_workloadFor$0(Tensor, Tensor, double[], CPUFun, int[], int, int)37100%2100%0201001
lambda$_workloadFor$18(Tensor, Tensor, Object[], CPUFun, Object[], int, int)36100%2100%0201001
lambda$_workloadFor$16(Tensor, Tensor, char[], CPUFun, char[], int, int)36100%2100%0201001
lambda$_workloadFor$14(Tensor, Tensor, boolean[], CPUFun, boolean[], int, int)36100%2100%0201001
lambda$_workloadFor$12(Tensor, Tensor, short[], CPUFun, short[], int, int)36100%2100%0201001
lambda$_workloadFor$10(Tensor, Tensor, byte[], CPUFun, byte[], int, int)36100%2100%0201001
lambda$_workloadFor$8(Tensor, Tensor, long[], CPUFun, long[], int, int)36100%2100%0201001
lambda$_workloadFor$6(Tensor, Tensor, int[], CPUFun, int[], int, int)36100%2100%0201001
lambda$_workloadFor$4(Tensor, Tensor, float[], CPUFun, float[], int, int)36100%2100%0201001
lambda$_workloadFor$2(Tensor, Tensor, double[], CPUFun, double[], int, int)36100%2100%0201001
run(ExecutionCall)16100%n/a010601
lambda$_workloadFor$17(Object[], CPUFun, Object[], int, int)16100%2100%020201
lambda$_workloadFor$9(byte[], CPUFun, byte[], int, int)16100%2100%020201
lambda$_workloadFor$7(long[], CPUFun, long[], int, int)16100%2100%020201
lambda$_workloadFor$5(int[], CPUFun, int[], int, int)16100%2100%020201
lambda$_workloadFor$3(float[], CPUFun, float[], int, int)16100%2100%020201
lambda$_workloadFor$1(double[], CPUFun, double[], int, int)16100%2100%020201
CPUElementwiseFunction(ScalarFun)6100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +CPUElementwiseFunction

CPUElementwiseFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total97 of 91689%12 of 8686%126613208323
_workloadFor(ExecutionCall)4933987%64287%62578201
lambda$_workloadFor$15(char[], CPUFun, char[], int, int)160%20%222211
lambda$_workloadFor$13(boolean[], CPUFun, boolean[], int, int)160%20%222211
lambda$_workloadFor$11(short[], CPUFun, short[], int, int)160%20%222211
lambda$_workloadFor$0(Tensor, Tensor, double[], CPUFun, int[], int, int)37100%2100%0201001
lambda$_workloadFor$18(Tensor, Tensor, Object[], CPUFun, Object[], int, int)36100%2100%0201001
lambda$_workloadFor$16(Tensor, Tensor, char[], CPUFun, char[], int, int)36100%2100%0201001
lambda$_workloadFor$14(Tensor, Tensor, boolean[], CPUFun, boolean[], int, int)36100%2100%0201001
lambda$_workloadFor$12(Tensor, Tensor, short[], CPUFun, short[], int, int)36100%2100%0201001
lambda$_workloadFor$10(Tensor, Tensor, byte[], CPUFun, byte[], int, int)36100%2100%0201001
lambda$_workloadFor$8(Tensor, Tensor, long[], CPUFun, long[], int, int)36100%2100%0201001
lambda$_workloadFor$6(Tensor, Tensor, int[], CPUFun, int[], int, int)36100%2100%0201001
lambda$_workloadFor$4(Tensor, Tensor, float[], CPUFun, float[], int, int)36100%2100%0201001
lambda$_workloadFor$2(Tensor, Tensor, double[], CPUFun, double[], int, int)36100%2100%0201001
run(ExecutionCall)16100%n/a010601
lambda$_workloadFor$17(Object[], CPUFun, Object[], int, int)16100%2100%020201
lambda$_workloadFor$9(byte[], CPUFun, byte[], int, int)16100%2100%020201
lambda$_workloadFor$7(long[], CPUFun, long[], int, int)16100%2100%020201
lambda$_workloadFor$5(int[], CPUFun, int[], int, int)16100%2100%020201
lambda$_workloadFor$3(float[], CPUFun, float[], int, int)16100%2100%020201
lambda$_workloadFor$1(double[], CPUFun, double[], int, int)16100%2100%020201
CPUElementwiseFunction(ScalarFun)6100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.java.html index aebee139b..b3e7f0174 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPUElementwiseFunction.java.html @@ -295,4 +295,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$1.html index 0885a9958..2784b8cb7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$1.html @@ -1 +1 @@ -CPURandomization.new CPURandomization.DataProvider() {...}

CPURandomization.new CPURandomization.DataProvider() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
get(Class)3100%n/a010101
\ No newline at end of file +CPURandomization.new CPURandomization.DataProvider() {...}

CPURandomization.new CPURandomization.DataProvider() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
get(Class)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$2.html index bb80f7519..ccc614c18 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization$2.html @@ -1 +1 @@ -CPURandomization.new CPURandomization.DataProvider() {...}

CPURandomization.new CPURandomization.DataProvider() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a020202
{...}6100%n/a010101
get(Class)6100%n/a010101
\ No newline at end of file +CPURandomization.new CPURandomization.DataProvider() {...}

CPURandomization.new CPURandomization.DataProvider() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a020202
{...}6100%n/a010101
get(Class)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.html index 324e71787..2c171c238 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.html @@ -1 +1 @@ -CPURandomization

CPURandomization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 1,01398%3 of 11097%3911194036
_newWorkloadFor(long, Class, CPURandomization.NDIteratorProvider, CPURandomization.DataProvider)1215892%13397%11814201
fillRandomly(Object, long)109100%18100%01001501
lambda$_newWorkloadFor$4(CPURandomization.NDIteratorProvider, long, float[], int, int)70100%6100%0401101
gaussianFrom(long, double[])67100%1375%1301301
lambda$_newWorkloadFor$3(long, float[], int, int)67100%6100%0401001
lambda$_newWorkloadFor$2(CPURandomization.NDIteratorProvider, long, double[], int, int)67100%6100%0401101
lambda$_newWorkloadFor$1(long, double[], int, int)64100%6100%0401001
_newWorkloadFor(ExecutionCall)38100%2100%020801
seedIndexScramble(long, long)30100%n/a010401
lambda$_newWorkloadFor$16(CPURandomization.NDIteratorProvider, char[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$14(CPURandomization.NDIteratorProvider, boolean[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$12(CPURandomization.NDIteratorProvider, short[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$10(CPURandomization.NDIteratorProvider, byte[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$8(CPURandomization.NDIteratorProvider, int[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$6(CPURandomization.NDIteratorProvider, long[], long, int, int)19100%2100%020401
_nextLong(long)18100%n/a010301
_nextSeed(long)18100%1150%120401
run(ExecutionCall)15100%n/a010701
_nextDouble(long, long)15100%n/a010101
lambda$_newWorkloadFor$15(char[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$13(boolean[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$11(short[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$9(byte[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$7(int[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$5(long[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$0(Tensor, int)10100%n/a010301
_nextBoolean(long)9100%2100%020101
fillRandomly(Object, String)8100%n/a010101
fillRandomly(Object, Arg.Seed)7100%n/a010101
_next(int, long)7100%n/a010101
initialScramble(long)6100%n/a010101
_nextInt(long)5100%n/a010101
_nextByte(long)4100%n/a010101
_nextShort(long)4100%n/a010101
_nextChar(long)4100%n/a010101
CPURandomization()3100%n/a010101
\ No newline at end of file +CPURandomization

CPURandomization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 1,01398%3 of 11097%3911194036
_newWorkloadFor(long, Class, CPURandomization.NDIteratorProvider, CPURandomization.DataProvider)1215892%13397%11814201
fillRandomly(Object, long)109100%18100%01001501
lambda$_newWorkloadFor$4(CPURandomization.NDIteratorProvider, long, float[], int, int)70100%6100%0401101
gaussianFrom(long, double[])67100%1375%1301301
lambda$_newWorkloadFor$3(long, float[], int, int)67100%6100%0401001
lambda$_newWorkloadFor$2(CPURandomization.NDIteratorProvider, long, double[], int, int)67100%6100%0401101
lambda$_newWorkloadFor$1(long, double[], int, int)64100%6100%0401001
_newWorkloadFor(ExecutionCall)38100%2100%020801
seedIndexScramble(long, long)30100%n/a010401
lambda$_newWorkloadFor$16(CPURandomization.NDIteratorProvider, char[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$14(CPURandomization.NDIteratorProvider, boolean[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$12(CPURandomization.NDIteratorProvider, short[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$10(CPURandomization.NDIteratorProvider, byte[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$8(CPURandomization.NDIteratorProvider, int[], long, int, int)19100%2100%020401
lambda$_newWorkloadFor$6(CPURandomization.NDIteratorProvider, long[], long, int, int)19100%2100%020401
_nextLong(long)18100%n/a010301
_nextSeed(long)18100%1150%120401
run(ExecutionCall)15100%n/a010701
_nextDouble(long, long)15100%n/a010101
lambda$_newWorkloadFor$15(char[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$13(boolean[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$11(short[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$9(byte[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$7(int[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$5(long[], long, int, int)14100%2100%020301
lambda$_newWorkloadFor$0(Tensor, int)10100%n/a010301
_nextBoolean(long)9100%2100%020101
fillRandomly(Object, String)8100%n/a010101
fillRandomly(Object, Arg.Seed)7100%n/a010101
_next(int, long)7100%n/a010101
initialScramble(long)6100%n/a010101
_nextInt(long)5100%n/a010101
_nextByte(long)4100%n/a010101
_nextShort(long)4100%n/a010101
_nextChar(long)4100%n/a010101
CPURandomization()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.java.html index 52968f89b..6f0d2499e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/CPURandomization.java.html @@ -327,4 +327,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.html index b31b2dffc..eec67bbc1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.elementwise

neureka.backend.main.implementations.elementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total544 of 3,77285%64 of 33881%1023736671851204238
CPUBiElementWise11852481%285867%2559510931601
CPUElementwiseFunction9781989%127486%12661320832301
CPUElementwiseAssignFun4534888%123071%10254660401
CPUBiElementWiseDivision.new CPUBiFun() {...}440%n/a55555511
CPUBiElementWiseModulo.new CPUBiFun() {...}440%n/a55555511
CPUBiElementWisePower.new CPUBiFun() {...}252650%n/a25252501
CPUBiElementWisePower.new CPUBiFun() {...}232451%n/a25252501
CPUBiElementWiseDivision.new CPUBiFun() {...}2122%n/a45454501
CPUBiElementWiseModulo.new CPUBiFun() {...}2122%n/a45454501
CPUBiElementWiseAddition.new CPUBiFun() {...}171851%40%59373701
CPUBiElementWisePower.new CPUBiFun() {...}151753%n/a25252501
CPURandomization121,00198%310797%391119403601
CPUBiElementWiseAddition.new CPUBiFun() {...}1233%n/a67676701
CLElementwiseFunction9393%3350%360180301
CLBiElementwise7992%2250%250150301
CPUBiElementWiseDivision1372%n/a14141401
CPUBiElementWiseModulo1372%n/a14141401
CLRandomization37%n/a12121201
CPUBiElementWiseDivision.new CPUBiFun() {...}1881%n/a15151501
CPUBiElementWiseModulo.new CPUBiFun() {...}1881%n/a15151501
CPUBiElementWiseMultiplication.new CPUBiFun() {...}66%n/a24242401
CPUBiElementWiseMultiplication.new CPUBiFun() {...}66%n/a24242401
CPUBiElementWiseAddition1381%n/a14141401
CPUBiElementWiseSubtraction.new CPUBiFun() {...}1083%n/a14141401
CPUBiElementWiseSubtraction.new CPUBiFun() {...}1083%n/a14141401
CPUBiElementWiseMultiplication18100%n/a04040401
CPUBiElementWiseSubtraction18100%n/a04040401
CPUBiElementWisePower18100%n/a04040401
CPUBiElementWiseMultiplication.new CPUBiFun() {...}18100%n/a04040401
CPUBiElementWiseSubtraction.new CPUBiFun() {...}18100%n/a04040401
CPURandomization.new CPURandomization.DataProvider() {...}12100%n/a02020201
CPURandomization.new CPURandomization.DataProvider() {...}9100%n/a02020201
CLBiElementwiseMultiplication100%n/a01020101
CLBiElementwiseAddition100%n/a01020101
CLBiElementwiseSubtraction100%n/a01020101
CLBiElementwisePower100%n/a01020101
CLBiElementwiseModulo100%n/a01020101
CLBiElementwiseDivision100%n/a01020101
\ No newline at end of file +neureka.backend.main.implementations.elementwise

neureka.backend.main.implementations.elementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total755 of 3,77279%69 of 33879%115373112718642041138
CPUBiElementWise11852481%285867%2559510931601
CLElementwiseFunction990%60%6618183311
CPUElementwiseFunction9781989%127486%12661320832301
CLBiElementwise850%40%5515153311
CPUElementwiseAssignFun4534888%123071%10254660401
CPUBiElementWiseDivision.new CPUBiFun() {...}440%n/a55555511
CPUBiElementWiseModulo.new CPUBiFun() {...}440%n/a55555511
CPUBiElementWisePower.new CPUBiFun() {...}252650%n/a25252501
CPUBiElementWisePower.new CPUBiFun() {...}232451%n/a25252501
CPUBiElementWiseDivision.new CPUBiFun() {...}2122%n/a45454501
CPUBiElementWiseModulo.new CPUBiFun() {...}2122%n/a45454501
CPUBiElementWiseAddition.new CPUBiFun() {...}171851%40%59373701
CPUBiElementWisePower.new CPUBiFun() {...}151753%n/a25252501
CPURandomization121,00198%310797%391119403601
CPUBiElementWiseAddition.new CPUBiFun() {...}1233%n/a67676701
CLRandomization0%n/a22222211
CLBiElementwiseMultiplication0%n/a11221111
CLBiElementwiseAddition0%n/a11221111
CLBiElementwiseSubtraction0%n/a11221111
CLBiElementwisePower0%n/a11221111
CLBiElementwiseModulo0%n/a11221111
CLBiElementwiseDivision0%n/a11221111
CPUBiElementWiseDivision1372%n/a14141401
CPUBiElementWiseModulo1372%n/a14141401
CPUBiElementWiseDivision.new CPUBiFun() {...}1881%n/a15151501
CPUBiElementWiseModulo.new CPUBiFun() {...}1881%n/a15151501
CPUBiElementWiseMultiplication.new CPUBiFun() {...}66%n/a24242401
CPUBiElementWiseMultiplication.new CPUBiFun() {...}66%n/a24242401
CPUBiElementWiseAddition1381%n/a14141401
CPUBiElementWiseSubtraction.new CPUBiFun() {...}1083%n/a14141401
CPUBiElementWiseSubtraction.new CPUBiFun() {...}1083%n/a14141401
CPUBiElementWiseMultiplication18100%n/a04040401
CPUBiElementWiseSubtraction18100%n/a04040401
CPUBiElementWisePower18100%n/a04040401
CPUBiElementWiseMultiplication.new CPUBiFun() {...}18100%n/a04040401
CPUBiElementWiseSubtraction.new CPUBiFun() {...}18100%n/a04040401
CPURandomization.new CPURandomization.DataProvider() {...}12100%n/a02020201
CPURandomization.new CPURandomization.DataProvider() {...}9100%n/a02020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.source.html index fd096d70a..8e80802b2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.elementwise/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.elementwise

neureka.backend.main.implementations.elementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total544 of 3,77285%64 of 33881%1023736671851204238
CPUBiElementWise.java11852481%285867%2559510931601
CPUElementwiseFunction.java9781989%127486%12661320832301
CPUBiElementWiseModulo.java743733%n/a11191016111914
CPUBiElementWiseDivision.java743733%n/a11191016111914
CPUBiElementWisePower.java638557%n/a61961661904
CPUElementwiseAssignFun.java4534888%123071%10254660401
CPUBiElementWiseAddition.java323753%40%12201016101803
CPURandomization.java121,02298%310797%395119804003
CPUBiElementWiseMultiplication.java5286%n/a41641341604
CLElementwiseFunction.java9393%3350%360180301
CLBiElementwise.java7992%2250%250150301
CLRandomization.java37%n/a12121201
CPUBiElementWiseSubtraction.java5693%n/a21621321604
CLBiElementwiseSubtraction.java100%n/a01020101
CLBiElementwiseModulo.java100%n/a01020101
CLBiElementwiseDivision.java100%n/a01020101
CLBiElementwisePower.java100%n/a01020101
CLBiElementwiseMultiplication.java100%n/a01020101
CLBiElementwiseAddition.java100%n/a01020101
\ No newline at end of file +neureka.backend.main.implementations.elementwise

neureka.backend.main.implementations.elementwise

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total755 of 3,77279%69 of 33879%115373112718642041138
CPUBiElementWise.java11852481%285867%2559510931601
CLElementwiseFunction.java990%60%6618183311
CPUElementwiseFunction.java9781989%127486%12661320832301
CLBiElementwise.java850%40%5515153311
CPUBiElementWiseModulo.java743733%n/a11191016111914
CPUBiElementWiseDivision.java743733%n/a11191016111914
CPUBiElementWisePower.java638557%n/a61961661904
CPUElementwiseAssignFun.java4534888%123071%10254660401
CPUBiElementWiseAddition.java323753%40%12201016101803
CPURandomization.java121,02298%310797%395119804003
CPUBiElementWiseMultiplication.java5286%n/a41641341604
CLRandomization.java0%n/a22222211
CLBiElementwiseAddition.java0%n/a11221111
CLBiElementwiseSubtraction.java0%n/a11221111
CLBiElementwiseModulo.java0%n/a11221111
CLBiElementwiseDivision.java0%n/a11221111
CLBiElementwisePower.java0%n/a11221111
CLBiElementwiseMultiplication.java0%n/a11221111
CPUBiElementWiseSubtraction.java5693%n/a21621321604
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.html index 26f05b81a..29bb668fc 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.html @@ -1 +1 @@ -CPUBiFun

CPUBiFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total76 of 8611%6 of 60%10117878
invoke(boolean, boolean)190%60%441111
invoke(Object, Object)140%n/a111111
invoke(byte, byte)100%n/a111111
invoke(int, int)90%n/a111111
invoke(float, float)80%n/a111111
invoke(long, long)80%n/a111111
invoke(char, char)80%n/a111111
invoke(short, short)10100%n/a010101
\ No newline at end of file +CPUBiFun

CPUBiFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total72 of 8212%6 of 60%10117878
invoke(boolean, boolean)170%60%441111
invoke(Object, Object)140%n/a111111
invoke(byte, byte)100%n/a111111
invoke(int, int)90%n/a111111
invoke(float, float)80%n/a111111
invoke(long, long)80%n/a111111
invoke(char, char)60%n/a111111
invoke(short, short)10100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.java.html index 3798f0b70..315b55911 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUBiFun.java.html @@ -4,20 +4,36 @@ { double invoke(double a, double b); - default float invoke(float a, float b) { return (float) invoke( (double) a, (double) b ); } + default float invoke(float a, float b) { + return (float) invoke( a, (double) b ); + } - default int invoke(int a, int b) { return (int) Math.round( invoke( (double) a, (double) b ) ); } + default int invoke(int a, int b) { + return (int) Math.round( invoke( a, (double) b ) ); + } - default long invoke(long a, long b) { return Math.round( invoke( (double) a, (double) b ) ); } + default long invoke(long a, long b) { + return Math.round( invoke( (double) a, (double) b ) ); + } - default byte invoke(byte a, byte b) { return (byte) Math.round( invoke( (double) a, (double) b ) ); } + default byte invoke(byte a, byte b) { + return (byte) Math.round( invoke( a, (double) b ) ); + } - default short invoke(short a, short b) { return (short) Math.round( invoke( (double) a, (double) b ) ); } + default short invoke(short a, short b) { + return (short) Math.round( invoke( a, (double) b ) ); + } - default boolean invoke(boolean a, boolean b) { return Math.round( invoke( a ? 1 : 0, b ? 1 : 0 ) ) != 0; } // Some default behaviors, it might make sense to override this for some activations. + default boolean invoke(boolean a, boolean b) { + return invoke( a ? 1 : 0, b ? 1 : 0 ) != 0; // Some default behaviors, it might make sense to override this for some activations. + } - default char invoke(char a, char b) { return (char) Math.round( invoke( (int) a, (int) b ) ); } // Some default behaviors, it might make sense to override this for some activations. + default char invoke(char a, char b) { + return (char) invoke( a, (int) b ); // Some default behaviors, it might make sense to override this for some activations. + } - default Object invoke(Object a, Object b) { throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); } + default Object invoke(Object a, Object b) { + throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.html index 5ab8d9275..7b4f3e3a4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.html @@ -1 +1 @@ -CPUFun

CPUFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total41 of 7041%4 of 40%6104848
invoke(boolean)140%40%331111
invoke(Object)140%n/a111111
invoke(char)70%n/a111111
invoke(long)60%n/a111111
invoke(byte)8100%n/a010101
invoke(short)8100%n/a010101
invoke(int)7100%n/a010101
invoke(float)6100%n/a010101
\ No newline at end of file +CPUFun

CPUFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total37 of 6643%4 of 40%6104848
invoke(Object)140%n/a111111
invoke(boolean)120%40%331111
invoke(long)60%n/a111111
invoke(char)50%n/a111111
invoke(byte)8100%n/a010101
invoke(short)8100%n/a010101
invoke(int)7100%n/a010101
invoke(float)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.java.html index a653614af..eca64af10 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/CPUFun.java.html @@ -4,21 +4,37 @@ double invoke(double x); - default float invoke(float x) { return (float) invoke( (double) x ); } + default float invoke(float x) { + return (float) invoke( (double) x ); + } - default int invoke(int x) { return (int) Math.round( invoke( (double) x ) ); } + default int invoke(int x) { + return (int) Math.round( invoke( (double) x ) ); + } - default long invoke(long x) { return Math.round( invoke( (double) x ) ); } + default long invoke(long x) { + return Math.round( invoke( (double) x ) ); + } - default byte invoke(byte x) { return (byte) Math.round( invoke( (double) x ) ); } + default byte invoke(byte x) { + return (byte) Math.round( invoke( (double) x ) ); + } - default short invoke(short x) { return (short) Math.round( invoke( (double) x ) ); } + default short invoke(short x) { + return (short) Math.round( invoke( (double) x ) ); + } - default boolean invoke(boolean x) { return Math.round( invoke( x ? 1 : 0 ) ) != 0; } // Some default behaviors, it might make sense to override this for some activations. + default boolean invoke(boolean x) { + return invoke( x ? 1 : 0 ) != 0; // Some default behaviors, it might make sense to override this for some activations. + } - default char invoke(char x) { return (char) Math.round( invoke( (int) x ) ); } // Some default behaviors, it might make sense to override this for some activations. + default char invoke(char x) { + return (char) invoke( (int) x ); // Some default behaviors, it might make sense to override this for some activations. + } - default Object invoke(Object x) { throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); } + default Object invoke(Object x) { + throw new IllegalStateException("Not implemented for operation "+getClass().getSimpleName()); + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.html index 3dee9da92..607972c02 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.html @@ -1 +1 @@ -ScalarFun

ScalarFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 105100%0 of 2100%0302602
static {...}93100%n/a0102301
calculate(double, boolean)12100%2100%020301
\ No newline at end of file +ScalarFun

ScalarFun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 105100%0 of 2100%0302602
static {...}93100%n/a0102301
calculate(double, boolean)12100%2100%020301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.java.html index a155ac0c1..5477dade2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/ScalarFun.java.html @@ -46,4 +46,4 @@ CPUFun getDerivative(); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.html index 13291aad3..c14b146bc 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.fun.api

neureka.backend.main.implementations.fun.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total117 of 26155%10 of 1216%16241142111803
CPUBiFun761011%60%1011787801
CPUFun412941%40%610484801
ScalarFun105100%2100%030260201
\ No newline at end of file +neureka.backend.main.implementations.fun.api

neureka.backend.main.implementations.fun.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total109 of 25356%10 of 1216%16241142111803
CPUBiFun721012%60%1011787801
CPUFun372943%40%610484801
ScalarFun105100%2100%030260201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.source.html index 86d7ae217..b952fb1e8 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun.api/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.fun.api

neureka.backend.main.implementations.fun.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total117 of 26155%10 of 1216%16241142111803
CPUBiFun.java761011%60%1011787801
CPUFun.java412941%40%610484801
ScalarFun.java105100%2100%030260201
\ No newline at end of file +neureka.backend.main.implementations.fun.api

neureka.backend.main.implementations.fun.api

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total109 of 25356%10 of 1216%16241142111803
CPUBiFun.java721012%60%1011787801
CPUFun.java372943%40%610484801
ScalarFun.java105100%2100%030260201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.html index d66e4240c..f662ce3dd 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.html @@ -1 +1 @@ -FunUtil

FunUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 9996%0 of 0n/a1311713
FunUtil()30%n/a111111
invSqrt(double)58100%n/a010901
invSqrt(float)38100%n/a010701
\ No newline at end of file +FunUtil

FunUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 9996%0 of 0n/a1311713
FunUtil()30%n/a111111
invSqrt(double)58100%n/a010901
invSqrt(float)38100%n/a010701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.java.html index e39dc4e4c..002685d2a 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/FunUtil.java.html @@ -28,4 +28,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$1.html index 45f81447c..6d5844e01 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$1.html @@ -1 +1 @@ -ScalarAbsolute.new CPUFun() {...}

ScalarAbsolute.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 15100%0 of 0n/a040404
{...}6100%n/a010101
invoke(double)3100%n/a010101
invoke(float)3100%n/a010101
invoke(int)3100%n/a010101
\ No newline at end of file +ScalarAbsolute.new CPUFun() {...}

ScalarAbsolute.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 15100%0 of 0n/a040404
{...}6100%n/a010101
invoke(double)3100%n/a010101
invoke(float)3100%n/a010101
invoke(int)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$2.html index 055365abd..65af3943d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute$2.html @@ -1 +1 @@ -ScalarAbsolute.new CPUFun() {...}

ScalarAbsolute.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 2850%4 of 633%472424
invoke(float)80%20%221111
invoke(int)60%20%221111
invoke(double)8100%2100%020101
{...}6100%n/a010101
\ No newline at end of file +ScalarAbsolute.new CPUFun() {...}

ScalarAbsolute.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 2850%4 of 633%472424
invoke(float)80%20%221111
invoke(int)60%20%221111
invoke(double)8100%2100%020101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.html index e79e09740..cbed1579c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.html @@ -1 +1 @@ -ScalarAbsolute

ScalarAbsolute

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarAbsolute()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarAbsolute

ScalarAbsolute

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarAbsolute()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.java.html index a03c6d1fe..ace07ab17 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarAbsolute.java.html @@ -7,9 +7,9 @@ { @Override public String id() { return "abs"; } - @Override public String activationCode() { return "output = fabs( input );\n"; } + @Override public String activationCode() { return "output = fabs( input );\n"; } - @Override public String derivationCode() { return "output = ( input < 0 ) ? -1 : 1;\n"; } + @Override public String derivationCode() { return "output = ( input < 0 ) ? -1 : 1;\n"; } @Override public CPUFun getActivation() { @@ -32,4 +32,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$1.html index 9f192ba32..78890c765 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$1.html @@ -1 +1 @@ -ScalarCbrt.new CPUFun() {...}

ScalarCbrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1464%0 of 0n/a131313
invoke(float)50%n/a111111
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarCbrt.new CPUFun() {...}

ScalarCbrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1464%0 of 0n/a131313
invoke(float)50%n/a111111
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$2.html index ca81f2e06..5a82a17e7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt$2.html @@ -1 +1 @@ -ScalarCbrt.new CPUFun() {...}

ScalarCbrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total24 of 240%0 of 0n/a333333
invoke(float)100%n/a111111
invoke(double)80%n/a111111
{...}60%n/a111111
\ No newline at end of file +ScalarCbrt.new CPUFun() {...}

ScalarCbrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total24 of 240%0 of 0n/a333333
invoke(float)100%n/a111111
invoke(double)80%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.html index 19d06d191..667972c52 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.html @@ -1 +1 @@ -ScalarCbrt

ScalarCbrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1973%0 of 0n/a161616
getDerivative()50%n/a111111
getActivation()5100%n/a010101
ScalarCbrt()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarCbrt

ScalarCbrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 1952%0 of 0n/a363636
getDerivative()50%n/a111111
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
ScalarCbrt()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.java.html index af2d50a5e..14542759f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCbrt.java.html @@ -9,12 +9,12 @@ @Override public String activationCode() { - return "output = cbrt( input );\n"; + return "output = cbrt( input );\n"; } @Override public String derivationCode() { - return "output = 1 / ( 3 * pow( input, 2.0f / 3.0f ) );\n"; + return "output = 1 / ( 3 * pow( input, 2.0f / 3.0f ) );\n"; } @Override @@ -35,4 +35,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$1.html index 0c35f1188..18a679904 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$1.html @@ -1 +1 @@ -ScalarCosinus.new CPUFun() {...}

ScalarCosinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarCosinus.new CPUFun() {...}

ScalarCosinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$2.html index 2b92f827a..6445c7f28 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus$2.html @@ -1 +1 @@ -ScalarCosinus.new CPUFun() {...}

ScalarCosinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file +ScalarCosinus.new CPUFun() {...}

ScalarCosinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.html index 607ff951a..eefd85769 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.html @@ -1 +1 @@ -ScalarCosinus

ScalarCosinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarCosinus()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarCosinus

ScalarCosinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarCosinus()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.java.html index d9323a94e..2265483d2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarCosinus.java.html @@ -8,10 +8,10 @@ @Override public String id() { return "cos"; } @Override - public String activationCode() { return "output = cos( input );\n"; } + public String activationCode() { return "output = cos( input );\n"; } @Override - public String derivationCode() { return "output = -sin( input );\n"; } + public String derivationCode() { return "output = -sin( input );\n"; } @Override public CPUFun getActivation() { @@ -30,4 +30,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$1.html index f8ce46f7e..52ea0b0a0 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$1.html @@ -1 +1 @@ -ScalarExp.new CPUFun() {...}

ScalarExp.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarExp.new CPUFun() {...}

ScalarExp.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$2.html index f39681639..12c8f0133 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp$2.html @@ -1 +1 @@ -ScalarExp.new CPUFun() {...}

ScalarExp.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 1478%0 of 0n/a131313
invoke(double)30%n/a111111
{...}6100%n/a010101
invoke(float)5100%n/a010101
\ No newline at end of file +ScalarExp.new CPUFun() {...}

ScalarExp.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 1478%0 of 0n/a131313
invoke(double)30%n/a111111
{...}6100%n/a010101
invoke(float)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.html index 1eb65a3c6..d2533135f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.html @@ -1 +1 @@ -ScalarExp

ScalarExp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarExp()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarExp

ScalarExp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarExp()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.java.html index 9f85b4872..f4b3eaf05 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarExp.java.html @@ -9,12 +9,12 @@ @Override public String activationCode() { - return "output = exp( input );\n"; + return "output = exp( input );\n"; } @Override public String derivationCode() { - return "output = exp( input );\n"; + return "output = exp( input );\n"; } @Override @@ -34,4 +34,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$1.html index d7b94dd10..78a4c7b64 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$1.html @@ -1 +1 @@ -ScalarGaSU.new CPUFun() {...}

ScalarGaSU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 20100%0 of 0n/a030303
invoke(double)7100%n/a010101
invoke(float)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarGaSU.new CPUFun() {...}

ScalarGaSU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 20100%0 of 0n/a030303
invoke(double)7100%n/a010101
invoke(float)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$2.html index 1d4d54f48..359561768 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU$2.html @@ -1 +1 @@ -ScalarGaSU.new CPUFun() {...}

ScalarGaSU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 56100%0 of 0n/a030703
invoke(double)25100%n/a010301
invoke(float)25100%n/a010301
{...}6100%n/a010101
\ No newline at end of file +ScalarGaSU.new CPUFun() {...}

ScalarGaSU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 56100%0 of 0n/a030703
invoke(double)25100%n/a010301
invoke(float)25100%n/a010301
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.html index 3076f2cfb..80f1c49f3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.html @@ -1 +1 @@ -ScalarGaSU

ScalarGaSU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGaSU()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarGaSU

ScalarGaSU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGaSU()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.java.html index 09e8d32ee..76e21f4ab 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaSU.java.html @@ -17,13 +17,13 @@ @Override public String activationCode() { - return "float cubed = input * input * input; \n" + + return "float cubed = input * input * input; \n" + "output = cubed / ( 1.0f + fabs( cubed ) ); \n"; } @Override public String derivationCode() { - return "float x2 = input * input; \n" + + return "float x2 = input * input; \n" + "float x6 = x2 * x2 * x2; \n" + "output = 3.0f * x2 / ( 2.0f * x2 * fabs( input ) + x6 + 1.0f ); \n"; } @@ -55,4 +55,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$1.html index b03f0c257..f0fec6be5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$1.html @@ -1 +1 @@ -ScalarGaTU.new CPUFun() {...}

ScalarGaTU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 20100%0 of 0n/a030303
invoke(double)7100%n/a010101
invoke(float)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarGaTU.new CPUFun() {...}

ScalarGaTU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 20100%0 of 0n/a030303
invoke(double)7100%n/a010101
invoke(float)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$2.html index d00589623..5c7d70d67 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU$2.html @@ -1 +1 @@ -ScalarGaTU.new CPUFun() {...}

ScalarGaTU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 56100%0 of 0n/a0301103
invoke(float)26100%n/a010501
invoke(double)24100%n/a010501
{...}6100%n/a010101
\ No newline at end of file +ScalarGaTU.new CPUFun() {...}

ScalarGaTU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 56100%0 of 0n/a0301103
invoke(float)26100%n/a010501
invoke(double)24100%n/a010501
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.html index 4a97dab51..89aa5e389 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.html @@ -1 +1 @@ -ScalarGaTU

ScalarGaTU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGaTU()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarGaTU

ScalarGaTU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGaTU()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.java.html index 438370db3..02a37f9b2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaTU.java.html @@ -16,11 +16,11 @@ @Override public String id() { return "gatu"; } @Override - public String activationCode() { return "output = tanh(input*input*input);\n"; } + public String activationCode() { return "output = tanh(input*input*input);\n"; } @Override public String derivationCode() { - return "float x2 = input * input; \n" + + return "float x2 = input * input; \n" + "float x3 = x2 * input; \n" + "float temp = 3 * x2; \n" + "float tanh2 = pow(tanh(x3), 2); \n" + @@ -57,4 +57,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$1.html index 54c781f43..9e054dcc6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$1.html @@ -1 +1 @@ -ScalarGaussian.new CPUFun() {...}

ScalarGaussian.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)6100%n/a010101
\ No newline at end of file +ScalarGaussian.new CPUFun() {...}

ScalarGaussian.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$2.html index 2e18e52ff..0eddb7538 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian$2.html @@ -1 +1 @@ -ScalarGaussian.new CPUFun() {...}

ScalarGaussian.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 160%0 of 0n/a222222
invoke(double)100%n/a111111
{...}60%n/a111111
\ No newline at end of file +ScalarGaussian.new CPUFun() {...}

ScalarGaussian.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 160%0 of 0n/a222222
invoke(double)100%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.html index 64b8730f2..f925627c0 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.html @@ -1 +1 @@ -ScalarGaussian

ScalarGaussian

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1973%0 of 0n/a161616
getDerivative()50%n/a111111
getActivation()5100%n/a010101
ScalarGaussian()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarGaussian

ScalarGaussian

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 1952%0 of 0n/a363636
getDerivative()50%n/a111111
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
ScalarGaussian()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.java.html index 24a2cc975..b0382f41b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussian.java.html @@ -7,9 +7,9 @@ { @Override public String id() { return "gaus"; } - @Override public String activationCode() { return "output = exp( -( input * input ) );\n"; } + @Override public String activationCode() { return "output = exp( -( input * input ) );\n"; } - @Override public String derivationCode() { return "output = -2 * input * exp( -( input * input ) );\n"; } + @Override public String derivationCode() { return "output = -2 * input * exp( -( input * input ) );\n"; } @Override public CPUFun getActivation() { @@ -26,4 +26,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$1.html index 080459600..eabe39045 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$1.html @@ -1 +1 @@ -ScalarGaussianFast.new CPUFun() {...}

ScalarGaussianFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a030303
invoke(double)8100%n/a010101
invoke(float)8100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarGaussianFast.new CPUFun() {...}

ScalarGaussianFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a030303
invoke(double)8100%n/a010101
invoke(float)8100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$2.html index bcbb9459c..5ed1b6460 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast$2.html @@ -1 +1 @@ -ScalarGaussianFast.new CPUFun() {...}

ScalarGaussianFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 4257%0 of 0n/a132513
invoke(float)180%n/a112211
invoke(double)18100%n/a010201
{...}6100%n/a010101
\ No newline at end of file +ScalarGaussianFast.new CPUFun() {...}

ScalarGaussianFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 4257%0 of 0n/a132513
invoke(float)180%n/a112211
invoke(double)18100%n/a010201
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.html index 94fdb8663..7ad2986b1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.html @@ -1 +1 @@ -ScalarGaussianFast

ScalarGaussianFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGaussianFast()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarGaussianFast

ScalarGaussianFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGaussianFast()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.java.html index 40ae53b44..599d9de55 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGaussianFast.java.html @@ -7,10 +7,10 @@ { @Override public String id() { return "fast_gaus"; } - @Override public String activationCode() { return "output = 1 / ( 1 + input * input );\n"; } + @Override public String activationCode() { return "output = 1 / ( 1 + input * input );\n"; } @Override public String derivationCode() { - return "float x2 = input * input;\n" + + return "float x2 = input * input;\n" + "output = -2 * input / ( x2 * x2 + 2 * x2 + 1 );\n"; } @@ -38,4 +38,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$1.html index bace3101c..6531b7107 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$1.html @@ -1 +1 @@ -ScalarGeLU.new CPUFun() {...}

ScalarGeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarGeLU.new CPUFun() {...}

ScalarGeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$2.html index 9bfbdef27..1e516deab 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU$2.html @@ -1 +1 @@ -ScalarGeLU.new CPUFun() {...}

ScalarGeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 46100%0 of 0n/a030703
invoke(float)21100%n/a010301
invoke(double)19100%n/a010301
{...}6100%n/a010101
\ No newline at end of file +ScalarGeLU.new CPUFun() {...}

ScalarGeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 46100%0 of 0n/a030703
invoke(float)21100%n/a010301
invoke(double)19100%n/a010301
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.html index fce508894..d8216e94b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.html @@ -1 +1 @@ -ScalarGeLU

ScalarGeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 26100%0 of 0n/a070707
gelu(double)7100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGeLU()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarGeLU

ScalarGeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2684%0 of 0n/a272727
activationCode()20%n/a111111
derivationCode()20%n/a111111
gelu(double)7100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarGeLU()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.java.html index cf167872a..710c79cb3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarGeLU.java.html @@ -17,10 +17,10 @@ @Override public String id() { return "gelu"; } - @Override public String activationCode() { return "output = input / ( 1 + (float) exp(-input * 1.702) );\n"; } + @Override public String activationCode() { return "output = input / ( 1 + (float) exp(-input * 1.702) );\n"; } @Override public String derivationCode() { - return "float sig = 1.0f / ( 1.0f + exp( -input * 1.702f ) );" + + return "float sig = 1.0f / ( 1.0f + exp( -input * 1.702f ) );" + "float ds = sig * ( 1.0f - sig );" + "output = sig + ds * input * 1.702;\n"; } @@ -54,4 +54,4 @@ public static double gelu(double x) { return x * ScalarSigmoid.sig(x * 1.702); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$1.html index ab39d1a06..b2e829602 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$1.html @@ -1 +1 @@ -ScalarIdentity.new CPUFun() {...}

ScalarIdentity.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 20100%0 of 0n/a080808
{...}6100%n/a010101
invoke(double)2100%n/a010101
invoke(float)2100%n/a010101
invoke(int)2100%n/a010101
invoke(long)2100%n/a010101
invoke(boolean)2100%n/a010101
invoke(char)2100%n/a010101
invoke(Object)2100%n/a010101
\ No newline at end of file +ScalarIdentity.new CPUFun() {...}

ScalarIdentity.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 20100%0 of 0n/a080808
{...}6100%n/a010101
invoke(double)2100%n/a010101
invoke(float)2100%n/a010101
invoke(int)2100%n/a010101
invoke(long)2100%n/a010101
invoke(boolean)2100%n/a010101
invoke(char)2100%n/a010101
invoke(Object)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$2.html index 363ba16aa..afcc3818e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity$2.html @@ -1 +1 @@ -ScalarIdentity.new CPUFun() {...}

ScalarIdentity.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 160%0 of 0n/a666666
{...}60%n/a111111
invoke(double)20%n/a111111
invoke(float)20%n/a111111
invoke(int)20%n/a111111
invoke(long)20%n/a111111
invoke(Object)20%n/a111111
\ No newline at end of file +ScalarIdentity.new CPUFun() {...}

ScalarIdentity.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 160%0 of 0n/a666666
{...}60%n/a111111
invoke(double)20%n/a111111
invoke(float)20%n/a111111
invoke(int)20%n/a111111
invoke(long)20%n/a111111
invoke(Object)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.html index 5b040f3eb..c244249e7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.html @@ -1 +1 @@ -ScalarIdentity

ScalarIdentity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1973%0 of 0n/a161616
getDerivative()50%n/a111111
getActivation()5100%n/a010101
ScalarIdentity()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarIdentity

ScalarIdentity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 1952%0 of 0n/a363636
getDerivative()50%n/a111111
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
ScalarIdentity()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.java.html index 5e2a68334..bf4b73717 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarIdentity.java.html @@ -7,9 +7,9 @@ { @Override public String id() { return "idy"; } - @Override public String activationCode() { return "output = input; \n"; } + @Override public String activationCode() { return "output = input; \n"; } - @Override public String derivationCode() { return "output = 1.0f; \n"; } + @Override public String derivationCode() { return "output = 1.0f; \n"; } @Override public CPUFun getActivation() { @@ -36,4 +36,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$1.html index 7a18fad79..b9708a4e9 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$1.html @@ -1 +1 @@ -ScalarLog10.new CPUFun() {...}

ScalarLog10.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1464%0 of 0n/a131313
invoke(float)50%n/a111111
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarLog10.new CPUFun() {...}

ScalarLog10.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1464%0 of 0n/a131313
invoke(float)50%n/a111111
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$2.html index ea7e98f73..431402133 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10$2.html @@ -1 +1 @@ -ScalarLog10.new CPUFun() {...}

ScalarLog10.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 210%0 of 0n/a333333
invoke(float)80%n/a111111
invoke(double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file +ScalarLog10.new CPUFun() {...}

ScalarLog10.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 210%0 of 0n/a333333
invoke(float)80%n/a111111
invoke(double)70%n/a111111
{...}60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.html index e7e3e60f5..a3869034b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.html @@ -1 +1 @@ -ScalarLog10

ScalarLog10

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1973%0 of 0n/a161616
getDerivative()50%n/a111111
getActivation()5100%n/a010101
ScalarLog10()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarLog10

ScalarLog10

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 1952%0 of 0n/a363636
getDerivative()50%n/a111111
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
ScalarLog10()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.java.html index c4acd4d77..3711fb9ad 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLog10.java.html @@ -9,12 +9,12 @@ @Override public String activationCode() { - return "output = log10( input );\n"; + return "output = log10( input );\n"; } @Override public String derivationCode() { - return "output = 1.0f / ( input * log( 10.0f ) );\n"; + return "output = 1.0f / ( input * log( 10.0f ) );\n"; } @Override @@ -34,4 +34,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$1.html index c392f0f86..29238a98d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$1.html @@ -1 +1 @@ -ScalarLogarithm.new CPUFun() {...}

ScalarLogarithm.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarLogarithm.new CPUFun() {...}

ScalarLogarithm.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$2.html index b011f7653..9a67432f1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm$2.html @@ -1 +1 @@ -ScalarLogarithm.new CPUFun() {...}

ScalarLogarithm.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1471%0 of 0n/a131313
invoke(float)40%n/a111111
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file +ScalarLogarithm.new CPUFun() {...}

ScalarLogarithm.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1471%0 of 0n/a131313
invoke(float)40%n/a111111
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.html index 312ff8f1b..111a1ecd0 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.html @@ -1 +1 @@ -ScalarLogarithm

ScalarLogarithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarLogarithm()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarLogarithm

ScalarLogarithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarLogarithm()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.java.html index bb45d1544..7ebddafdf 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarLogarithm.java.html @@ -8,10 +8,10 @@ @Override public String id() { return "ln"; } @Override - public String activationCode() { return "output = log( input );\n"; } + public String activationCode() { return "output = log( input );\n"; } @Override - public String derivationCode() { return "output = 1.0 / ( input );\n"; } + public String derivationCode() { return "output = 1.0 / ( input );\n"; } @Override public CPUFun getActivation() { @@ -29,4 +29,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$1.html index 0c3dd6ea3..73140ef75 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$1.html @@ -1 +1 @@ -ScalarQuadratic.new CPUFun() {...}

ScalarQuadratic.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 1855%0 of 0n/a242424
invoke(float)40%n/a111111
invoke(int)40%n/a111111
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file +ScalarQuadratic.new CPUFun() {...}

ScalarQuadratic.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 1855%0 of 0n/a242424
invoke(float)40%n/a111111
invoke(int)40%n/a111111
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$2.html index ebdf69061..f86ccbd45 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic$2.html @@ -1 +1 @@ -ScalarQuadratic.new CPUFun() {...}

ScalarQuadratic.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 1855%0 of 0n/a242424
invoke(float)40%n/a111111
invoke(int)40%n/a111111
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file +ScalarQuadratic.new CPUFun() {...}

ScalarQuadratic.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 1855%0 of 0n/a242424
invoke(float)40%n/a111111
invoke(int)40%n/a111111
{...}6100%n/a010101
invoke(double)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.html index 4f15610fa..9f907f3d7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.html @@ -1 +1 @@ -ScalarQuadratic

ScalarQuadratic

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarQuadratic()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarQuadratic

ScalarQuadratic

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarQuadratic()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.java.html index a9ee5b504..0fdf07959 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarQuadratic.java.html @@ -7,9 +7,9 @@ { @Override public String id() { return "quad"; } - @Override public String activationCode() { return "output = input * input;\n"; } + @Override public String activationCode() { return "output = input * input;\n"; } - @Override public String derivationCode() { return "output = 2 * input;\n"; } + @Override public String derivationCode() { return "output = 2 * input;\n"; } @Override public CPUFun getActivation() { @@ -30,4 +30,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$1.html index bcbe09dad..d3439f15f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$1.html @@ -1 +1 @@ -ScalarReLU.new CPUFun() {...}

ScalarReLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 26100%0 of 4100%050303
invoke(double)10100%2100%020101
invoke(float)10100%2100%020101
{...}6100%n/a010101
\ No newline at end of file +ScalarReLU.new CPUFun() {...}

ScalarReLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 26100%0 of 4100%050303
invoke(double)10100%2100%020101
invoke(float)10100%2100%020101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$2.html index ae991e992..1bc959fab 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU$2.html @@ -1 +1 @@ -ScalarReLU.new CPUFun() {...}

ScalarReLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 2263%2 of 450%251313
invoke(float)80%20%221111
invoke(double)8100%2100%020101
{...}6100%n/a010101
\ No newline at end of file +ScalarReLU.new CPUFun() {...}

ScalarReLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 2263%2 of 450%251313
invoke(float)80%20%221111
invoke(double)8100%2100%020101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.html index c6eb4429c..bd7f31ef1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.html @@ -1 +1 @@ -ScalarReLU

ScalarReLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarReLU()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarReLU

ScalarReLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarReLU()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.java.html index 4eaecc54f..e46dbb01a 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarReLU.java.html @@ -9,12 +9,12 @@ @Override public String activationCode() { - return "if (input >= 0) { output = input; } else { output = input * (float)0.01; }\n"; + return "if (input >= 0) { output = input; } else { output = input * (float)0.01; }\n"; } @Override public String derivationCode() { - return "if (input >= 0) { output = (float)1; } else { output = (float)0.01; }\n"; + return "if (input >= 0) { output = (float)1; } else { output = (float)0.01; }\n"; } @Override @@ -34,4 +34,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$1.html index baf4ce741..40031dcd3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$1.html @@ -1 +1 @@ -ScalarSeLU.new CPUFun() {...}

ScalarSeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarSeLU.new CPUFun() {...}

ScalarSeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$2.html index a06a43a7d..60e2b9415 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU$2.html @@ -1 +1 @@ -ScalarSeLU.new CPUFun() {...}

ScalarSeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 4264%4 of 850%373703
invoke(float)13631%3125%232301
invoke(double)21588%1375%131301
{...}6100%n/a010101
\ No newline at end of file +ScalarSeLU.new CPUFun() {...}

ScalarSeLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 4264%4 of 850%373703
invoke(float)13631%3125%232301
invoke(double)21588%1375%131301
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.html index bc98f5ffc..89b0fbce9 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.html @@ -1 +1 @@ -ScalarSeLU

ScalarSeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 4095%1 of 475%191907
selu(double)21990%1375%131301
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSeLU()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarSeLU

ScalarSeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 4085%1 of 475%393927
selu(double)21990%1375%131301
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSeLU()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.java.html index 381b32f74..90c60f70c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSeLU.java.html @@ -25,13 +25,13 @@ @Override public String id() { return "selu"; } @Override public String activationCode() { - return "if ( input > 0 ) output = "+SCALE_F32+"f * input;\n" + + return "if ( input > 0 ) output = "+SCALE_F32+"f * input;\n" + "else if ( input <= 0 ) output = "+SCALE_F32+"f * "+ALPHA_F32+"f * (exp(input) - 1.0f);\n" + "else output = 0.0f;\n"; } @Override public String derivationCode() { - return "if ( input > 0 ) output = "+SCALE_F32+"f;\n" + + return "if ( input > 0 ) output = "+SCALE_F32+"f;\n" + "else if ( input <= 0 ) output = "+SCALE_F32+"f * "+ALPHA_F32+"f * exp(input);\n" + "else output = 1.0f;\n"; } @@ -71,4 +71,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$1.html index fe5454c7b..fd44764e1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$1.html @@ -1 +1 @@ -ScalarSiLU.new CPUFun() {...}

ScalarSiLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarSiLU.new CPUFun() {...}

ScalarSiLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$2.html index 00396391e..4f6124552 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU$2.html @@ -1 +1 @@ -ScalarSiLU.new CPUFun() {...}

ScalarSiLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 34100%0 of 0n/a030503
invoke(float)15100%n/a010201
invoke(double)13100%n/a010201
{...}6100%n/a010101
\ No newline at end of file +ScalarSiLU.new CPUFun() {...}

ScalarSiLU.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 34100%0 of 0n/a030503
invoke(float)15100%n/a010201
invoke(double)13100%n/a010201
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.html index 3aedd29f1..0884e6149 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.html @@ -1 +1 @@ -ScalarSiLU

ScalarSiLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 24100%0 of 0n/a070707
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
silu(double)5100%n/a010101
ScalarSiLU()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarSiLU

ScalarSiLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2483%0 of 0n/a272727
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
silu(double)5100%n/a010101
ScalarSiLU()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.java.html index 819acfbb0..a4be7da9e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSiLU.java.html @@ -13,10 +13,10 @@ { @Override public String id() { return "silu"; } - @Override public String activationCode() { return "output = input / ( 1 + (float) exp(-input) );\n"; } + @Override public String activationCode() { return "output = input / ( 1 + (float) exp(-input) );\n"; } @Override public String derivationCode() { - return "float sig = 1.0f / ( 1.0f + exp( -input ) );" + + return "float sig = 1.0f / ( 1.0f + exp( -input ) );" + "output = sig + ( input * sig * ( 1.0f - sig ) );\n"; } @@ -48,4 +48,4 @@ public static double silu(double x) { return x * ScalarSigmoid.sig(x); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$1.html index fa6632705..372e8cbcb 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$1.html @@ -1 +1 @@ -ScalarSigmoid.new CPUFun() {...}

ScalarSigmoid.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarSigmoid.new CPUFun() {...}

ScalarSigmoid.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030303
{...}6100%n/a010101
invoke(float)5100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$2.html index f11b5d9e2..f136785a1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid$2.html @@ -1 +1 @@ -ScalarSigmoid.new CPUFun() {...}

ScalarSigmoid.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 26100%0 of 0n/a030503
invoke(float)11100%n/a010201
invoke(double)9100%n/a010201
{...}6100%n/a010101
\ No newline at end of file +ScalarSigmoid.new CPUFun() {...}

ScalarSigmoid.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 26100%0 of 0n/a030503
invoke(float)11100%n/a010201
invoke(double)9100%n/a010201
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.html index 21335650e..e6f5512fb 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.html @@ -1 +1 @@ -ScalarSigmoid

ScalarSigmoid

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 27100%0 of 0n/a070707
sig(double)8100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSigmoid()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarSigmoid

ScalarSigmoid

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2785%0 of 0n/a272727
activationCode()20%n/a111111
derivationCode()20%n/a111111
sig(double)8100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSigmoid()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.java.html index 35f054602..540f45391 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSigmoid.java.html @@ -7,9 +7,9 @@ { @Override public String id() { return "sig"; } - @Override public String activationCode() { return "output = 1 / ( 1 + (float) exp(-input) );\n"; } + @Override public String activationCode() { return "output = 1 / ( 1 + (float) exp(-input) );\n"; } - @Override public String derivationCode() { return "output = input * ( 1 - input );\n"; } + @Override public String derivationCode() { return "output = input * ( 1 - input );\n"; } @Override public CPUFun getActivation() { @@ -38,4 +38,4 @@ public static double sig(double x) { return 1d / ( 1d + Math.exp( -x ) ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$1.html index ae316c036..55096ca8c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$1.html @@ -1 +1 @@ -ScalarSinus.new CPUFun() {...}

ScalarSinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarSinus.new CPUFun() {...}

ScalarSinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$2.html index c600358a2..11c26c979 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus$2.html @@ -1 +1 @@ -ScalarSinus.new CPUFun() {...}

ScalarSinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarSinus.new CPUFun() {...}

ScalarSinus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.html index d8d3704a3..e38a5c0fe 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.html @@ -1 +1 @@ -ScalarSinus

ScalarSinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSinus()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarSinus

ScalarSinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSinus()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.java.html index b65279d60..e0c82f0bf 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSinus.java.html @@ -7,9 +7,9 @@ { @Override public String id() { return "sin"; } - @Override public String activationCode() { return "output = sin( input );\n"; } + @Override public String activationCode() { return "output = sin( input );\n"; } - @Override public String derivationCode() { return "output = cos( input );\n"; } + @Override public String derivationCode() { return "output = cos( input );\n"; } @Override public CPUFun getActivation() { @@ -26,4 +26,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$1.html index 1cc9e1ebc..7e04f6ec1 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$1.html @@ -1 +1 @@ -ScalarSoftplus.new CPUFun() {...}

ScalarSoftplus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)6100%n/a010101
\ No newline at end of file +ScalarSoftplus.new CPUFun() {...}

ScalarSoftplus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a020202
{...}6100%n/a010101
invoke(double)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$2.html index ec4f6fa45..bb42e4318 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus$2.html @@ -1 +1 @@ -ScalarSoftplus.new CPUFun() {...}

ScalarSoftplus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a020202
invoke(double)8100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarSoftplus.new CPUFun() {...}

ScalarSoftplus.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a020202
invoke(double)8100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.html index 61c31eaa5..ac7f1623e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.html @@ -1 +1 @@ -ScalarSoftplus

ScalarSoftplus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSoftplus()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarSoftplus

ScalarSoftplus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSoftplus()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.java.html index 6338d2030..a843e7ce3 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftplus.java.html @@ -11,9 +11,9 @@ { @Override public String id() { return "softplus"; } - @Override public String activationCode() { return "output = log( 1.0f + exp( input ) );"; } + @Override public String activationCode() { return "output = log( 1.0f + exp( input ) );"; } - @Override public String derivationCode() { return "output = 1.0f / ( 1.0f + exp( -input ) );\n"; } + @Override public String derivationCode() { return "output = 1.0f / ( 1.0f + exp( -input ) );\n"; } @Override public CPUFun getActivation() { @@ -30,4 +30,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$1.html index 45c961067..9e542d2e9 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$1.html @@ -1 +1 @@ -ScalarSoftsign.new CPUFun() {...}

ScalarSoftsign.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double)3100%n/a010101
invoke(float)3100%n/a010101
\ No newline at end of file +ScalarSoftsign.new CPUFun() {...}

ScalarSoftsign.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a030303
{...}6100%n/a010101
invoke(double)3100%n/a010101
invoke(float)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$2.html index bf36ccd0b..4e37db70d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign$2.html @@ -1 +1 @@ -ScalarSoftsign.new CPUFun() {...}

ScalarSoftsign.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 3259%0 of 0n/a131313
invoke(float)130%n/a111111
invoke(double)13100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarSoftsign.new CPUFun() {...}

ScalarSoftsign.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 3259%0 of 0n/a131313
invoke(float)130%n/a111111
invoke(double)13100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.html index 4538fbd61..103a038db 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.html @@ -1 +1 @@ -ScalarSoftsign

ScalarSoftsign

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 33100%0 of 0n/a080808
softsign(double)7100%n/a010101
softsign(float)7100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSoftsign()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarSoftsign

ScalarSoftsign

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 3387%0 of 0n/a282828
activationCode()20%n/a111111
derivationCode()20%n/a111111
softsign(double)7100%n/a010101
softsign(float)7100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarSoftsign()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.java.html index 4e8b8a3d6..b36faab85 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSoftsign.java.html @@ -17,12 +17,12 @@ @Override public String activationCode() { - return "output = input / ( 1.0f + fabs( input ) );\n"; + return "output = input / ( 1.0f + fabs( input ) );\n"; } @Override public String derivationCode() { - return "output = 1.0f / ( 2.0f * fabs( input ) + input * input + 1.0f );\n"; + return "output = 1.0f / ( 2.0f * fabs( input ) + input * input + 1.0f );\n"; } @Override @@ -46,4 +46,4 @@ public static float softsign(float x) { return x / ( 1f + Math.abs( x ) ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$1.html index 9d3f178b1..3a4f153d5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$1.html @@ -1 +1 @@ -ScalarSqrt.new CPUFun() {...}

ScalarSqrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1464%0 of 0n/a131313
invoke(float)50%n/a111111
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file +ScalarSqrt.new CPUFun() {...}

ScalarSqrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1464%0 of 0n/a131313
invoke(float)50%n/a111111
{...}6100%n/a010101
invoke(double)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$2.html index 9efe2e25b..b7578b8d2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt$2.html @@ -1 +1 @@ -ScalarSqrt.new CPUFun() {...}

ScalarSqrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 160%0 of 0n/a333333
{...}60%n/a111111
invoke(double)50%n/a111111
invoke(float)50%n/a111111
\ No newline at end of file +ScalarSqrt.new CPUFun() {...}

ScalarSqrt.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 160%0 of 0n/a333333
{...}60%n/a111111
invoke(double)50%n/a111111
invoke(float)50%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.html index 88bd51b8d..c084f6851 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.html @@ -1 +1 @@ -ScalarSqrt

ScalarSqrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1973%0 of 0n/a161616
getDerivative()50%n/a111111
getActivation()5100%n/a010101
ScalarSqrt()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarSqrt

ScalarSqrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 1952%0 of 0n/a363636
getDerivative()50%n/a111111
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
ScalarSqrt()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.java.html index 635f804a1..94be28ad2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarSqrt.java.html @@ -9,12 +9,12 @@ @Override public String activationCode() { - return "output = sqrt( input );\n"; + return "output = sqrt( input );\n"; } @Override public String derivationCode() { - return "output = 0.5f * fast_inverse_sqrt( input );\n"; + return "output = 0.5f * fast_inverse_sqrt( input );\n"; } @Override @@ -34,4 +34,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$1.html index c7a828084..8a0f785a4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$1.html @@ -1 +1 @@ -ScalarTanh.new CPUFun() {...}

ScalarTanh.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 32100%0 of 0n/a030303
invoke(float)14100%n/a010101
invoke(double)12100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarTanh.new CPUFun() {...}

ScalarTanh.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 32100%0 of 0n/a030303
invoke(float)14100%n/a010101
invoke(double)12100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$2.html index 07d453ce6..44a39f29e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh$2.html @@ -1 +1 @@ -ScalarTanh.new CPUFun() {...}

ScalarTanh.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a030303
invoke(float)9100%n/a010101
invoke(double)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarTanh.new CPUFun() {...}

ScalarTanh.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 22100%0 of 0n/a030303
invoke(float)9100%n/a010101
invoke(double)7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.html index ca8b5a241..92068baa5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.html @@ -1 +1 @@ -ScalarTanh

ScalarTanh

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 45100%0 of 0n/a080808
tanh(float)14100%n/a010101
tanh(double)12100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarTanh()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarTanh

ScalarTanh

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 4591%0 of 0n/a282828
activationCode()20%n/a111111
derivationCode()20%n/a111111
tanh(float)14100%n/a010101
tanh(double)12100%n/a010101
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarTanh()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.java.html index dca463d0f..40c952d5e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanh.java.html @@ -9,12 +9,12 @@ @Override public String activationCode() { - return "output = tanh(input);\n"; + return "output = tanh(input);\n"; } @Override public String derivationCode() { - return "output = 1 - pow( tanh(input), 2.0f );\n"; + return "output = 1 - pow( tanh(input), 2.0f );\n"; } @Override @@ -39,4 +39,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$1.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$1.html index 48f9a3351..2d6366f77 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$1.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$1.html @@ -1 +1 @@ -ScalarTanhFast.new CPUFun() {...}

ScalarTanhFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 24100%0 of 0n/a030303
invoke(double)9100%n/a010101
invoke(float)9100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +ScalarTanhFast.new CPUFun() {...}

ScalarTanhFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 24100%0 of 0n/a030303
invoke(double)9100%n/a010101
invoke(float)9100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$2.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$2.html index b201ea058..727ba53ae 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$2.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast$2.html @@ -1 +1 @@ -ScalarTanhFast.new CPUFun() {...}

ScalarTanhFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total19 of 4254%0 of 0n/a133713
invoke(float)190%n/a113311
invoke(double)17100%n/a010301
{...}6100%n/a010101
\ No newline at end of file +ScalarTanhFast.new CPUFun() {...}

ScalarTanhFast.new CPUFun() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total19 of 4254%0 of 0n/a133713
invoke(float)190%n/a113311
invoke(double)17100%n/a010301
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.html index a742dede8..035c4c91d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.html @@ -1 +1 @@ -ScalarTanhFast

ScalarTanhFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 19100%0 of 0n/a060606
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarTanhFast()3100%n/a010101
id()2100%n/a010101
activationCode()2100%n/a010101
derivationCode()2100%n/a010101
\ No newline at end of file +ScalarTanhFast

ScalarTanhFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1978%0 of 0n/a262626
activationCode()20%n/a111111
derivationCode()20%n/a111111
getActivation()5100%n/a010101
getDerivative()5100%n/a010101
ScalarTanhFast()3100%n/a010101
id()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.java.html index a7866c396..3915f793e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/ScalarTanhFast.java.html @@ -9,12 +9,12 @@ @Override public String activationCode() { - return "output = input * fast_inverse_sqrt( 1.0f + input * input );\n"; + return "output = input * fast_inverse_sqrt( 1.0f + input * input );\n"; } @Override public String derivationCode() { - return "float temp1 = input * input;\n" + + return "float temp1 = input * input;\n" + "float temp2 = sqrt( 1 + temp1 );\n" + "output = 1 / ( temp1 * temp2 + temp2 );\n"; } @@ -47,4 +47,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.html index cdc5fe601..35a715dc0 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.fun

neureka.backend.main.implementations.fun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total248 of 1,60984%11 of 2657%453034029038290570
ScalarCbrt.new CPUFun() {...}240%n/a33333311
ScalarLog10.new CPUFun() {...}210%n/a33333311
ScalarTanhFast.new CPUFun() {...}192354%n/a13371301
ScalarGaussianFast.new CPUFun() {...}182457%n/a13251301
ScalarSqrt.new CPUFun() {...}160%n/a33333311
ScalarGaussian.new CPUFun() {...}160%n/a22222211
ScalarIdentity.new CPUFun() {...}160%n/a66666611
ScalarSeLU.new CPUFun() {...}152764%4450%37370301
ScalarAbsolute.new CPUFun() {...}141450%4233%47242401
ScalarSoftsign.new CPUFun() {...}131959%n/a13131301
ScalarReLU.new CPUFun() {...}81463%2250%25131301
ScalarQuadratic.new CPUFun() {...}81055%n/a24242401
ScalarQuadratic.new CPUFun() {...}81055%n/a24242401
ScalarLog1051473%n/a16161601
ScalarSqrt51473%n/a16161601
ScalarIdentity51473%n/a16161601
ScalarGaussian51473%n/a16161601
ScalarCbrt51473%n/a16161601
ScalarSqrt.new CPUFun() {...}5964%n/a13131301
ScalarCbrt.new CPUFun() {...}5964%n/a13131301
ScalarLog10.new CPUFun() {...}5964%n/a13131301
ScalarLogarithm.new CPUFun() {...}41071%n/a13131301
FunUtil39696%n/a131171301
ScalarExp.new CPUFun() {...}31178%n/a13131301
ScalarSeLU23895%1375%19190701
ScalarGaTU.new CPUFun() {...}56100%n/a030110301
ScalarGaSU.new CPUFun() {...}56100%n/a03070301
ScalarGeLU.new CPUFun() {...}46100%n/a03070301
ScalarTanh45100%n/a08080801
ScalarSiLU.new CPUFun() {...}34100%n/a03050301
ScalarSoftsign33100%n/a08080801
ScalarTanh.new CPUFun() {...}32100%n/a03030301
ScalarSigmoid27100%n/a07070701
ScalarReLU.new CPUFun() {...}26100%4100%05030301
ScalarSigmoid.new CPUFun() {...}26100%n/a03050301
ScalarGeLU26100%n/a07070701
ScalarSiLU24100%n/a07070701
ScalarTanhFast.new CPUFun() {...}24100%n/a03030301
ScalarTanh.new CPUFun() {...}22100%n/a03030301
ScalarGaussianFast.new CPUFun() {...}22100%n/a03030301
ScalarGaTU.new CPUFun() {...}20100%n/a03030301
ScalarGaSU.new CPUFun() {...}20100%n/a03030301
ScalarIdentity.new CPUFun() {...}20100%n/a08080801
ScalarCosinus19100%n/a06060601
ScalarExp19100%n/a06060601
ScalarQuadratic19100%n/a06060601
ScalarGaTU19100%n/a06060601
ScalarGaussianFast19100%n/a06060601
ScalarTanhFast19100%n/a06060601
ScalarLogarithm19100%n/a06060601
ScalarGaSU19100%n/a06060601
ScalarAbsolute19100%n/a06060601
ScalarSoftplus19100%n/a06060601
ScalarSinus19100%n/a06060601
ScalarReLU19100%n/a06060601
ScalarAbsolute.new CPUFun() {...}15100%n/a04040401
ScalarExp.new CPUFun() {...}14100%n/a03030301
ScalarSoftplus.new CPUFun() {...}14100%n/a02020201
ScalarSiLU.new CPUFun() {...}14100%n/a03030301
ScalarSigmoid.new CPUFun() {...}14100%n/a03030301
ScalarSeLU.new CPUFun() {...}14100%n/a03030301
ScalarGeLU.new CPUFun() {...}14100%n/a03030301
ScalarGaussian.new CPUFun() {...}12100%n/a02020201
ScalarSoftplus.new CPUFun() {...}12100%n/a02020201
ScalarSoftsign.new CPUFun() {...}12100%n/a03030301
ScalarCosinus.new CPUFun() {...}10100%n/a02020201
ScalarSinus.new CPUFun() {...}9100%n/a02020201
ScalarSinus.new CPUFun() {...}9100%n/a02020201
ScalarLogarithm.new CPUFun() {...}9100%n/a02020201
ScalarCosinus.new CPUFun() {...}9100%n/a02020201
\ No newline at end of file +neureka.backend.main.implementations.fun

neureka.backend.main.implementations.fun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total340 of 1,60978%11 of 2657%913038629084290570
ScalarCbrt.new CPUFun() {...}240%n/a33333311
ScalarLog10.new CPUFun() {...}210%n/a33333311
ScalarTanhFast.new CPUFun() {...}192354%n/a13371301
ScalarGaussianFast.new CPUFun() {...}182457%n/a13251301
ScalarSqrt.new CPUFun() {...}160%n/a33333311
ScalarGaussian.new CPUFun() {...}160%n/a22222211
ScalarIdentity.new CPUFun() {...}160%n/a66666611
ScalarSeLU.new CPUFun() {...}152764%4450%37370301
ScalarAbsolute.new CPUFun() {...}141450%4233%47242401
ScalarSoftsign.new CPUFun() {...}131959%n/a13131301
ScalarLog1091052%n/a36363601
ScalarSqrt91052%n/a36363601
ScalarIdentity91052%n/a36363601
ScalarGaussian91052%n/a36363601
ScalarCbrt91052%n/a36363601
ScalarReLU.new CPUFun() {...}81463%2250%25131301
ScalarQuadratic.new CPUFun() {...}81055%n/a24242401
ScalarQuadratic.new CPUFun() {...}81055%n/a24242401
ScalarSeLU63485%1375%39392701
ScalarSqrt.new CPUFun() {...}5964%n/a13131301
ScalarCbrt.new CPUFun() {...}5964%n/a13131301
ScalarLog10.new CPUFun() {...}5964%n/a13131301
ScalarTanh44191%n/a28282801
ScalarSoftsign42987%n/a28282801
ScalarSigmoid42385%n/a27272701
ScalarGeLU42284%n/a27272701
ScalarSiLU42083%n/a27272701
ScalarCosinus41578%n/a26262601
ScalarExp41578%n/a26262601
ScalarQuadratic41578%n/a26262601
ScalarGaTU41578%n/a26262601
ScalarGaussianFast41578%n/a26262601
ScalarTanhFast41578%n/a26262601
ScalarLogarithm41578%n/a26262601
ScalarGaSU41578%n/a26262601
ScalarAbsolute41578%n/a26262601
ScalarSoftplus41578%n/a26262601
ScalarSinus41578%n/a26262601
ScalarReLU41578%n/a26262601
ScalarLogarithm.new CPUFun() {...}41071%n/a13131301
FunUtil39696%n/a131171301
ScalarExp.new CPUFun() {...}31178%n/a13131301
ScalarGaTU.new CPUFun() {...}56100%n/a030110301
ScalarGaSU.new CPUFun() {...}56100%n/a03070301
ScalarGeLU.new CPUFun() {...}46100%n/a03070301
ScalarSiLU.new CPUFun() {...}34100%n/a03050301
ScalarTanh.new CPUFun() {...}32100%n/a03030301
ScalarReLU.new CPUFun() {...}26100%4100%05030301
ScalarSigmoid.new CPUFun() {...}26100%n/a03050301
ScalarTanhFast.new CPUFun() {...}24100%n/a03030301
ScalarTanh.new CPUFun() {...}22100%n/a03030301
ScalarGaussianFast.new CPUFun() {...}22100%n/a03030301
ScalarGaTU.new CPUFun() {...}20100%n/a03030301
ScalarGaSU.new CPUFun() {...}20100%n/a03030301
ScalarIdentity.new CPUFun() {...}20100%n/a08080801
ScalarAbsolute.new CPUFun() {...}15100%n/a04040401
ScalarExp.new CPUFun() {...}14100%n/a03030301
ScalarSoftplus.new CPUFun() {...}14100%n/a02020201
ScalarSiLU.new CPUFun() {...}14100%n/a03030301
ScalarSigmoid.new CPUFun() {...}14100%n/a03030301
ScalarSeLU.new CPUFun() {...}14100%n/a03030301
ScalarGeLU.new CPUFun() {...}14100%n/a03030301
ScalarGaussian.new CPUFun() {...}12100%n/a02020201
ScalarSoftplus.new CPUFun() {...}12100%n/a02020201
ScalarSoftsign.new CPUFun() {...}12100%n/a03030301
ScalarCosinus.new CPUFun() {...}10100%n/a02020201
ScalarSinus.new CPUFun() {...}9100%n/a02020201
ScalarSinus.new CPUFun() {...}9100%n/a02020201
ScalarLogarithm.new CPUFun() {...}9100%n/a02020201
ScalarCosinus.new CPUFun() {...}9100%n/a02020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.source.html index 5a7331404..c3b3b938c 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.fun/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.fun

neureka.backend.main.implementations.fun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total248 of 1,60984%11 of 2657%453034029038290570
ScalarCbrt.java342340%n/a51241051213
ScalarLog10.java312342%n/a51241051213
ScalarSqrt.java262346%n/a51241051213
ScalarIdentity.java213461%n/a72061872013
ScalarGaussian.java212655%n/a3102831013
ScalarTanhFast.java196677%n/a11231411203
ScalarGaussianFast.java186578%n/a11221211203
ScalarSeLU.java177982%5758%41941701303
ScalarQuadratic.java163970%n/a41441241403
ScalarAbsolute.java144877%4233%41721221403
ScalarSoftsign.java136483%n/a11411211403
ScalarReLU.java85988%2675%21611011203
ScalarLogarithm.java43890%n/a1111911103
FunUtil.java39696%n/a131171301
ScalarExp.java34493%n/a11211011203
ScalarTanh.java99100%n/a01401201403
ScalarGaTU.java95100%n/a01201801203
ScalarGaSU.java95100%n/a01201401203
ScalarGeLU.java86100%n/a01301501303
ScalarSiLU.java72100%n/a01301301303
ScalarSigmoid.java67100%n/a01301301303
ScalarSoftplus.java45100%n/a0100801003
ScalarCosinus.java38100%n/a0100801003
ScalarSinus.java37100%n/a0100801003
\ No newline at end of file +neureka.backend.main.implementations.fun

neureka.backend.main.implementations.fun

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total340 of 1,60978%11 of 2657%913038629084290570
ScalarCbrt.java381933%n/a71261071213
ScalarLog10.java351935%n/a71261071213
ScalarSqrt.java301938%n/a71261071213
ScalarIdentity.java253054%n/a92081892013
ScalarGaussian.java252246%n/a5104851013
ScalarTanhFast.java236272%n/a31251431203
ScalarGaussianFast.java226173%n/a31241231203
ScalarSeLU.java217578%5758%61961721303
ScalarQuadratic.java203563%n/a61461261403
ScalarAbsolute.java184470%4233%61741241403
ScalarSoftsign.java176077%n/a31431231403
ScalarReLU.java125582%2675%41631031203
ScalarLogarithm.java83480%n/a3113931103
ScalarExp.java74085%n/a31231031203
ScalarTanh.java49595%n/a21421221403
ScalarGaTU.java49195%n/a21221821203
ScalarGaSU.java49195%n/a21221421203
ScalarGeLU.java48295%n/a21321521303
ScalarSiLU.java46894%n/a21321321303
ScalarSigmoid.java46394%n/a21321321303
ScalarSoftplus.java44191%n/a2102821003
ScalarCosinus.java43489%n/a2102821003
ScalarSinus.java43389%n/a2102821003
FunUtil.java39696%n/a131171301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.html b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.html index efb78ecc2..15834b869 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.html @@ -1 +1 @@ -CLDot

CLDot

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 12491%3 of 650%3621903
run(ExecutionCall)1010090%3350%3421701
lambda$run$0(String)11100%n/a010101
CLDot()3100%n/a010101
\ No newline at end of file +CLDot

CLDot

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 12483%3 of 650%4631913
lambda$run$0(String)110%n/a111111
run(ExecutionCall)1010090%3350%3421701
CLDot()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.java.html index c1a249c01..57d62a543 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CLDot.java.html @@ -33,7 +33,7 @@ // First we multiply the two vectors: String kernelName = "multiply_arrays_for_dot_product"; Supplier<String> code = () -> - "__kernel void " + kernelName + "(__global const float* a, \n" + + "__kernel void " + kernelName + "(__global const float* a, \n" + " __global const float* b, \n" + " __global float* c,\n" + " const int n) {\n" + @@ -55,4 +55,4 @@ return c; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.html b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.html index d9ff30860..3cbf73dda 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.html @@ -1 +1 @@ -CPUDot

CPUDot

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 29788%7 of 2065%71764807
run(ExecutionCall)3520785%71365%71163901
execute(double[], double[], double[], int)11100%n/a010201
execute(float[], float[], float[], int)11100%n/a010201
execute(long[], long[], long[], int)11100%n/a010201
execute(int[], int[], int[], int)11100%n/a010201
lambda$run$0(Tensor, Tensor)8100%n/a010101
CPUDot()3100%n/a010101
\ No newline at end of file +CPUDot

CPUDot

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 29788%7 of 2065%71764807
run(ExecutionCall)3520785%71365%71163901
execute(double[], double[], double[], int)11100%n/a010201
execute(float[], float[], float[], int)11100%n/a010201
execute(long[], long[], long[], int)11100%n/a010201
execute(int[], int[], int[], int)11100%n/a010201
lambda$run$0(Tensor, Tensor)8100%n/a010101
CPUDot()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.java.html index aa805d2d3..ba17536ec 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.linear/CPUDot.java.html @@ -86,4 +86,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.html index d4491a0a0..d1358d47b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.linear

neureka.backend.main.implementations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total45 of 42189%10 of 2661%102386701002
CPUDot3526288%71365%7176480701
CLDot1011491%3350%362190301
\ No newline at end of file +neureka.backend.main.implementations.linear

neureka.backend.main.implementations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total56 of 42186%10 of 2661%112396711002
CPUDot3526288%71365%7176480701
CLDot2110383%3350%463191301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.source.html index 2ca4ba355..8e79379f5 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.linear/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.linear

neureka.backend.main.implementations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total45 of 42189%10 of 2661%102386701002
CPUDot.java3526288%71365%7176480701
CLDot.java1011491%3350%362190301
\ No newline at end of file +neureka.backend.main.implementations.linear

neureka.backend.main.implementations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total56 of 42186%10 of 2661%112396711002
CPUDot.java3526288%71365%7176480701
CLDot.java2110383%3350%463191301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.html b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.html index ddb700ca0..d2d2116c2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.html @@ -1 +1 @@ -CLMatMul

CLMatMul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 90100%0 of 4100%0501803
lambda$new$1(ExecutionCall)74100%2100%0201601
lambda$new$0(Tensor)9100%2100%020101
CLMatMul()7100%n/a010201
\ No newline at end of file +CLMatMul

CLMatMul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total90 of 900%4 of 40%55181833
lambda$new$1(ExecutionCall)740%20%22161611
lambda$new$0(Tensor)90%20%221111
CLMatMul()70%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.java.html index 78a0c16de..bf1798d8f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CLMatMul.java.html @@ -7,27 +7,27 @@ public class CLMatMul extends SimpleCLImplementation { public CLMatMul() { - super( + super( call -> { - if ( - call.validate() - .all( t -> t.getNDConf().getLayout() == NDConfiguration.Layout.COLUMN_MAJOR ) - .isValid() + if ( + call.validate() + .all( t -> t.getNDConf().getLayout() == NDConfiguration.Layout.COLUMN_MAJOR ) + .isValid() ) { - return new CLGEMM().run( call ); + return new CLGEMM().run( call ); } else { - int M = call.input(1).shape(0); - int N = call.input(2).shape(1); - int K = call.input(1).shape(1); - call.getDevice() - .getKernel(call) - .pass(M).pass(N).pass(K) - .pass(call.input(Number.class, 1)) - .pass(call.input(Number.class, 2)) - .pass(call.input(Number.class, 0)) - .call(new long[]{M, N}, null); + int M = call.input(1).shape(0); + int N = call.input(2).shape(1); + int K = call.input(1).shape(1); + call.getDevice() + .getKernel(call) + .pass(M).pass(N).pass(K) + .pass(call.input(Number.class, 1)) + .pass(call.input(Number.class, 2)) + .pass(call.input(Number.class, 0)) + .call(new long[]{M, N}, null); - return call.input(0); + return call.input(0); } }, 3, @@ -50,6 +50,6 @@ " C[ n + m * N ] = acc; \n" + " } \n" ); - } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.html b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.html index 908829611..bb51b94c6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.html @@ -1 +1 @@ -CPUMatMul

CPUMatMul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total45 of 37588%7 of 3680%72555507
run(ExecutionCall)4527185%72980%71954601
execute(boolean, double[], double[], double[], int, int, int)12100%n/a010201
execute(boolean, float[], float[], float[], int, int, int)12100%n/a010201
execute(boolean, long[], long[], long[], int, int, int)12100%n/a010201
execute(boolean, int[], int[], int[], int, int, int)12100%n/a010201
lambda$run$0(Tensor, Tensor)8100%n/a010101
CPUMatMul()3100%n/a010101
\ No newline at end of file +CPUMatMul

CPUMatMul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total45 of 37588%7 of 3680%72555507
run(ExecutionCall)4527185%72980%71954601
execute(boolean, double[], double[], double[], int, int, int)12100%n/a010201
execute(boolean, float[], float[], float[], int, int, int)12100%n/a010201
execute(boolean, long[], long[], long[], int, int, int)12100%n/a010201
execute(boolean, int[], int[], int[], int, int, int)12100%n/a010201
lambda$run$0(Tensor, Tensor)8100%n/a010101
CPUMatMul()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.java.html index 7005ecd21..bbbc223e6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/CPUMatMul.java.html @@ -109,4 +109,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.html index cd2cd201f..d85870d9b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.matmul

neureka.backend.main.implementations.matmul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total45 of 46590%7 of 4082%73057301002
CPUMatMul4533088%72980%7255550701
CLMatMul90100%4100%050180301
\ No newline at end of file +neureka.backend.main.implementations.matmul

neureka.backend.main.implementations.matmul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total135 of 46570%11 of 4072%1230237331012
CLMatMul900%40%5518183311
CPUMatMul4533088%72980%7255550701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.source.html index 4adb1c4bc..b00ac6b22 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.matmul/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.matmul

neureka.backend.main.implementations.matmul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total45 of 46590%7 of 4082%73057301002
CPUMatMul.java4533088%72980%7255550701
CLMatMul.java90100%4100%050180301
\ No newline at end of file +neureka.backend.main.implementations.matmul

neureka.backend.main.implementations.matmul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total135 of 46570%11 of 4072%1230237331012
CLMatMul.java900%40%5518183311
CPUMatMul.java4533088%72980%7255550701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.html index a3d908ff0..3ee41a122 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.html @@ -1 +1 @@ -CLScalarFunction

CLScalarFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total43 of 4912%2 of 20%236912
run(ExecutionCall)430%20%226611
CLScalarFunction(ScalarFun)6100%n/a010301
\ No newline at end of file +CLScalarFunction

CLScalarFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total49 of 490%2 of 20%339922
run(ExecutionCall)430%20%226611
CLScalarFunction(ScalarFun)60%n/a113311
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.java.html index b856621bf..1af0feea4 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CLScalarFunction.java.html @@ -12,9 +12,9 @@ { private final ScalarFun _fun; - public CLScalarFunction(ScalarFun fun) { - _fun = fun; - } + public CLScalarFunction(ScalarFun fun) { + _fun = fun; + } @Override public Tensor<?> run(ExecutionCall<OpenCLDevice> call) { @@ -26,4 +26,4 @@ return call.input(0); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.html index ee0c27737..1ddb498b2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.html @@ -1 +1 @@ -CPUScalarBroadcastFunction

CPUScalarBroadcastFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total183 of 31241%12 of 2040%1217437637
_workloadFor(ExecutionCall)907645%6650%67132901
lambda$_workloadFor$3(Tensor, Tensor, Object[], Object, int, int)310%20%22101011
lambda$_workloadFor$2(Tensor, Tensor, int[], int, int, int)310%20%22101011
lambda$_workloadFor$1(Tensor, Tensor, float[], float, int, int)310%20%22101011
lambda$_workloadFor$0(Tensor, Tensor, double[], double, int, int)31100%2100%0201001
run(ExecutionCall)16100%n/a010601
CPUScalarBroadcastFunction(ScalarFun)6100%n/a010101
\ No newline at end of file +CPUScalarBroadcastFunction

CPUScalarBroadcastFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total183 of 31241%12 of 2040%1217437637
_workloadFor(ExecutionCall)907645%6650%67132901
lambda$_workloadFor$3(Tensor, Tensor, Object[], Object, int, int)310%20%22101011
lambda$_workloadFor$2(Tensor, Tensor, int[], int, int, int)310%20%22101011
lambda$_workloadFor$1(Tensor, Tensor, float[], float, int, int)310%20%22101011
lambda$_workloadFor$0(Tensor, Tensor, double[], double, int, int)31100%2100%0201001
run(ExecutionCall)16100%n/a010601
CPUScalarBroadcastFunction(ScalarFun)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.java.html index eec8fb318..225186289 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarBroadcastFunction.java.html @@ -132,4 +132,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.html index ffc244d2f..6ae86c90a 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.html @@ -1 +1 @@ -CPUScalarFunction

CPUScalarFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total45 of 5111%2 of 20%237812
run(ExecutionCall)450%20%227711
CPUScalarFunction(ScalarFun)6100%n/a010101
\ No newline at end of file +CPUScalarFunction

CPUScalarFunction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total45 of 5111%2 of 20%237812
run(ExecutionCall)450%20%227711
CPUScalarFunction(ScalarFun)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.java.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.java.html index 0261d0fad..23cd3cd4d 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/CPUScalarFunction.java.html @@ -26,4 +26,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.html index 4a7c87631..0767b40cf 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.html @@ -1 +1 @@ -neureka.backend.main.implementations.scalar

neureka.backend.main.implementations.scalar

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total271 of 41234%16 of 2433%1623569351103
CPUScalarBroadcastFunction18312941%12840%121743763701
CPUScalarFunction45611%20%23781201
CLScalarFunction43612%20%23691201
\ No newline at end of file +neureka.backend.main.implementations.scalar

neureka.backend.main.implementations.scalar

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total277 of 41232%16 of 2433%1723599361113
CPUScalarBroadcastFunction18312941%12840%121743763701
CLScalarFunction490%20%33992211
CPUScalarFunction45611%20%23781201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.source.html index d30ef441e..21c110522 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations.scalar/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations.scalar

neureka.backend.main.implementations.scalar

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total271 of 41234%16 of 2433%1623569351103
CPUScalarBroadcastFunction.java18312941%12840%121743763701
CPUScalarFunction.java45611%20%23781201
CLScalarFunction.java43612%20%23691201
\ No newline at end of file +neureka.backend.main.implementations.scalar

neureka.backend.main.implementations.scalar

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total277 of 41232%16 of 2433%1723599361113
CPUScalarBroadcastFunction.java18312941%12840%121743763701
CLScalarFunction.java490%20%33992211
CPUScalarFunction.java45611%20%23781201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.html b/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.html index 2e85b7fc2..d1807704f 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.html @@ -1 +1 @@ -CLImplementation

CLImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 5100%0 of 0n/a010201
CLImplementation(ImplementationFor, int)5100%n/a010201
\ No newline at end of file +CLImplementation

CLImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 50%0 of 0n/a112211
CLImplementation(ImplementationFor, int)50%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.java.html b/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.java.html index f7cf8b3da..bd2fb9cf7 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/CLImplementation.java.html @@ -18,7 +18,7 @@ ImplementationFor<OpenCLDevice> execution, int arity ) { - super( execution, arity ); - } + super( execution, arity ); + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.html b/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.html index 9a5c5641f..48a1420bb 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.html @@ -1 +1 @@ -CPUImplementation

CPUImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030203
lambda$withArity$0(int, ImplementationFor)6100%n/a010101
CPUImplementation(ImplementationFor, int)5100%n/a010101
withArity(int)3100%n/a010101
\ No newline at end of file +CPUImplementation

CPUImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 14100%0 of 0n/a030203
lambda$withArity$0(int, ImplementationFor)6100%n/a010101
CPUImplementation(ImplementationFor, int)5100%n/a010101
withArity(int)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.java.html b/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.java.html index 3d1eda177..1ef65c7da 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/CPUImplementation.java.html @@ -27,4 +27,4 @@ CPUImplementation andImplementation( ImplementationFor<CPU> creator ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.html b/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.html index a063824e6..e9970a44e 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.html @@ -1 +1 @@ -ParsedCLImplementation

ParsedCLImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 30096%8 of 2263%71935508
ParsedCLImplementation(ImplementationFor, int, String, String, String, String, Function)128587%5337%4531801
lambda$_getParsedKernelsFromTemplate$2(String[], String, Map, String, String, String)90100%31178%380901
lambda$new$0(String)41100%n/a0101101
_getParsedKernelsFromTemplate(String, String, String, String, String)31100%n/a010701
getKernelFor(ExecutionCall)19100%n/a010501
lambda$new$1(String)14100%n/a010401
lambda$getKernelFor$3(DataType, KernelCode)5100%n/a010101
getKernelCode()3100%n/a010101
\ No newline at end of file +ParsedCLImplementation

ParsedCLImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total300 of 3000%22 of 220%1919555588
ParsedCLImplementation(ImplementationFor, int, String, String, String, String, Function)970%80%55181811
lambda$_getParsedKernelsFromTemplate$2(String[], String, Map, String, String, String)900%140%889911
lambda$new$0(String)410%n/a11111111
_getParsedKernelsFromTemplate(String, String, String, String, String)310%n/a117711
getKernelFor(ExecutionCall)190%n/a115511
lambda$new$1(String)140%n/a114411
lambda$getKernelFor$3(DataType, KernelCode)50%n/a111111
getKernelCode()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.java.html b/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.java.html index ba51164e7..60e15f8b6 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/ParsedCLImplementation.java.html @@ -14,27 +14,27 @@ public class ParsedCLImplementation extends CLImplementation { - private final java.util.function.Function<String, String> _aliasSwapper = + private final java.util.function.Function<String, String> _aliasSwapper = s -> - "//-=<PARSED>=-//\n" + - s.replace("src1", "src1[_i_of_idx_on_tln(prv_src1_cfg, rank)]") - .replace("src2", "src2[_i_of_idx_on_tln(prv_src2_cfg, rank)]") - .replace("input1", "src1[_i_of_i(i, prv_src1_cfg, rank)]") - .replace("input2", "src2[_i_of_i(i, prv_src2_cfg, rank)]") - .replace("input", "src1[_i_of_i(i, prv_src1_cfg, rank)]") - .replace("output", "drn[_i_of_i(i, prv_drn_cfg, rank)]") - .replace("handle", "src1[_i_of_idx_on_tln(prv_src1_cfg, rank)]") - .replace("drain", "src2[_i_of_idx_on_tln(prv_src2_cfg, rank)]") - .replace("origin", "drn[di]") - .replace("target", "frn[_i_of_idx_on_tln(prv_frn_cfg, rank)]") + + "//-=<PARSED>=-//\n" + + s.replace("src1", "src1[_i_of_idx_on_tln(prv_src1_cfg, rank)]") + .replace("src2", "src2[_i_of_idx_on_tln(prv_src2_cfg, rank)]") + .replace("input1", "src1[_i_of_i(i, prv_src1_cfg, rank)]") + .replace("input2", "src2[_i_of_i(i, prv_src2_cfg, rank)]") + .replace("input", "src1[_i_of_i(i, prv_src1_cfg, rank)]") + .replace("output", "drn[_i_of_i(i, prv_drn_cfg, rank)]") + .replace("handle", "src1[_i_of_idx_on_tln(prv_src1_cfg, rank)]") + .replace("drain", "src2[_i_of_idx_on_tln(prv_src2_cfg, rank)]") + .replace("origin", "drn[di]") + .replace("target", "frn[_i_of_idx_on_tln(prv_frn_cfg, rank)]") + "\n//-=<PARSED>=-//"; - private final java.util.function.Function<String, String> asAdvanced = + private final java.util.function.Function<String, String> asAdvanced = s -> - s.replace("target", "frn[_i_of_idx_on_tln(prv_frn2_cfg, rank)]") - .replace("input3","frn[_i_of_idx_on_tln(prv_frn2_cfg, rank)]") - .replace("//-=<ARGUMENT>=-//", "") - .replace("//-=<CONFIGURATION>=-//", ""); + s.replace("target", "frn[_i_of_idx_on_tln(prv_frn2_cfg, rank)]") + .replace("input3","frn[_i_of_idx_on_tln(prv_frn2_cfg, rank)]") + .replace("//-=<ARGUMENT>=-//", "") + .replace("//-=<CONFIGURATION>=-//", ""); private final KernelCode[] _kernels; @@ -47,33 +47,33 @@ String postfix, Function<KernelCode, KernelCode[]> dataTypeAdapter ) { - super( lambda, arity ); - String parsedCode = null; - String parsedName = null; - if ( activationSource == null && differentiationSource == null ) { + super( lambda, arity ); + String parsedCode = null; + String parsedName = null; + if ( activationSource == null && differentiationSource == null ) { parsedCode = kernelSource; parsedName = postfix; - } else if (kernelSource.contains("__kernel")) { + } else if (kernelSource.contains("__kernel")) { boolean templateFound; - String[] parts = kernelSource.split("__kernel")[ 1 ].split("\\(")[ 0 ].split(" "); + String[] parts = kernelSource.split("__kernel")[ 1 ].split("\\(")[ 0 ].split(" "); - templateFound = parts[parts.length - 1].contains("template"); - if (!templateFound) + templateFound = parts[parts.length - 1].contains("template"); + if (!templateFound) throw new IllegalStateException("Invalid source code passed to AbstractCLExecution!"); else { - Map<String, String> map = _getParsedKernelsFromTemplate( + Map<String, String> map = _getParsedKernelsFromTemplate( parts[parts.length - 1], kernelSource, activationSource, differentiationSource, postfix ); - parsedName = map.keySet().toArray(new String[ 0 ])[ 0 ]; - parsedCode = map.values().toArray(new String[ 0 ])[ 0 ]; + parsedName = map.keySet().toArray(new String[ 0 ])[ 0 ]; + parsedCode = map.values().toArray(new String[ 0 ])[ 0 ]; } } - _kernels = dataTypeAdapter.apply( new KernelCode( parsedName, parsedCode ) ); - } + _kernels = dataTypeAdapter.apply( new KernelCode( parsedName, parsedCode ) ); + } private Map<String, String> _getParsedKernelsFromTemplate( String templateName, @@ -82,49 +82,49 @@ String differentiationSource, String postfix ) { - Map<String, String> code = new HashMap<>(); - String preName = templateName.replace("template", ""); - String source = kernelSource.replace("template", ""); - String[] parts = source.split("//-=<OPERATION>=-//"); + Map<String, String> code = new HashMap<>(); + String preName = templateName.replace("template", ""); + String source = kernelSource.replace("template", ""); + String[] parts = source.split("//-=<OPERATION>=-//"); - Parser parser = ( n, f, s ) -> { - String convcode = - parts[ 0 ].replace(preName, preName + n) + - _aliasSwapper.apply(f) + + Parser parser = ( n, f, s ) -> { + String convcode = + parts[ 0 ].replace(preName, preName + n) + + _aliasSwapper.apply(f) + parts[ 2 ] + - _aliasSwapper.apply(s) + + _aliasSwapper.apply(s) + parts[4]; - boolean isAdvanced = s.contains("target")&&s.contains("drain")&&s.contains("handle") - || s.contains("input1")&&s.contains("input2")&&s.contains("input3"); - convcode = (isAdvanced) ? asAdvanced.apply(convcode) : convcode; - code.put(preName + n, convcode); - }; + boolean isAdvanced = s.contains("target")&&s.contains("drain")&&s.contains("handle") + || s.contains("input1")&&s.contains("input2")&&s.contains("input3"); + convcode = (isAdvanced) ? asAdvanced.apply(convcode) : convcode; + code.put(preName + n, convcode); + }; //Tensor t0_origin, Tensor t1_handle, Tensor t2_drain ... when d>=0 //Tensor t0_drain, Tensor t1_src1, Tensor t2_src2 //drn[di], src1[_i_of_idx_on_tln(prv_src1_cfg, rank)], src2[_i_of_idx_on_tln(prv_src2_cfg, rank)] //default: src1 o src2 -> drain //inverse: src1/fdrn <-src2 <- drain //=========================================================================== - parser.apply( + parser.apply( postfix, activationSource, differentiationSource ); - return code; + return code; } @Override public KernelCode getKernelFor( ExecutionCall<OpenCLDevice> call ) { - DataType<?> callType = call.input(0 ).getDataType(); - return Arrays.stream(_kernels) - .filter( k -> k.getDataType().equals( callType ) ) - .findFirst() - .orElse(_kernels[0]); + DataType<?> callType = call.input(0 ).getDataType(); + return Arrays.stream(_kernels) + .filter( k -> k.getDataType().equals( callType ) ) + .findFirst() + .orElse(_kernels[0]); } @Override public KernelCode[] getKernelCode() { - return _kernels; + return _kernels; } private interface Parser @@ -133,4 +133,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.html b/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.html index 899102dd2..91e0df23b 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.html @@ -1 +1 @@ -SimpleCLImplementation

SimpleCLImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 23100%0 of 0n/a030503
SimpleCLImplementation(ImplementationFor, int, String, String)12100%n/a010301
getKernelCode()8100%n/a010101
getKernelFor(ExecutionCall)3100%n/a010101
\ No newline at end of file +SimpleCLImplementation

SimpleCLImplementation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 230%0 of 0n/a335533
SimpleCLImplementation(ImplementationFor, int, String, String)120%n/a113311
getKernelCode()80%n/a111111
getKernelFor(ExecutionCall)30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.java.html b/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.java.html index 156ddd270..aff914292 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.java.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/SimpleCLImplementation.java.html @@ -15,18 +15,18 @@ String kernelName, String kernelSource ) { - super(execution, arity); - _kernel = new KernelCode( kernelName, kernelSource ); - } + super(execution, arity); + _kernel = new KernelCode( kernelName, kernelSource ); + } @Override public KernelCode getKernelFor( ExecutionCall<OpenCLDevice> call ) { - return _kernel; + return _kernel; } @Override public KernelCode[] getKernelCode() { - return new KernelCode[]{ _kernel }; + return new KernelCode[]{ _kernel }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/index.html b/docs/coverage/test/html/neureka.backend.main.implementations/index.html index 38264b19e..0f83ccce2 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/index.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/index.html @@ -1 +1 @@ -neureka.backend.main.implementations

neureka.backend.main.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total12 of 34296%8 of 2263%72636401504
ParsedCLImplementation1228896%81463%7193550801
SimpleCLImplementation23100%n/a03050301
CPUImplementation14100%n/a03020301
CLImplementation5100%n/a01020101
\ No newline at end of file +neureka.backend.main.implementations

neureka.backend.main.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total328 of 3424%22 of 220%23266264121534
ParsedCLImplementation3000%220%191955558811
SimpleCLImplementation230%n/a33553311
CLImplementation50%n/a11221111
CPUImplementation14100%n/a03020301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.implementations/index.source.html b/docs/coverage/test/html/neureka.backend.main.implementations/index.source.html index fe4f22620..6a0dd9d27 100644 --- a/docs/coverage/test/html/neureka.backend.main.implementations/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.implementations/index.source.html @@ -1 +1 @@ -neureka.backend.main.implementations

neureka.backend.main.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total12 of 34296%8 of 2263%72636401504
ParsedCLImplementation.java1228896%81463%7193550801
SimpleCLImplementation.java23100%n/a03050301
CPUImplementation.java14100%n/a03020301
CLImplementation.java5100%n/a01020101
\ No newline at end of file +neureka.backend.main.implementations

neureka.backend.main.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total328 of 3424%22 of 220%23266264121534
ParsedCLImplementation.java3000%220%191955558811
SimpleCLImplementation.java230%n/a33553311
CLImplementation.java50%n/a11221111
CPUImplementation.java14100%n/a03020301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.html b/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.html index 2a4626ef9..2b192ad2d 100644 --- a/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.html +++ b/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.html @@ -1 +1 @@ -MemUtil

MemUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 121100%1 of 1291%11401908
autoDelete(Tensor[])42100%1990%160901
keep(Tensor, Tensor, Supplier)28100%n/a010501
keep(Tensor[], Supplier)19100%n/a010501
lambda$autoDelete$0(GraphNode)8100%2100%020101
lambda$keep$4(Tensor)6100%n/a010101
lambda$keep$3(Tensor)6100%n/a010101
lambda$keep$2(Tensor)6100%n/a010101
lambda$keep$1(Tensor)6100%n/a010101
\ No newline at end of file +MemUtil

MemUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 121100%1 of 1291%11401908
autoDelete(Tensor[])42100%1990%160901
keep(Tensor, Tensor, Supplier)28100%n/a010501
keep(Tensor[], Supplier)19100%n/a010501
lambda$autoDelete$0(GraphNode)8100%2100%020101
lambda$keep$4(Tensor)6100%n/a010101
lambda$keep$3(Tensor)6100%n/a010101
lambda$keep$2(Tensor)6100%n/a010101
lambda$keep$1(Tensor)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.java.html b/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.java.html index 7da7efebe..9c959a648 100644 --- a/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.java.html +++ b/docs/coverage/test/html/neureka.backend.main.memory/MemUtil.java.html @@ -101,4 +101,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.html b/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.html index 9ffa40eba..6cdf2014e 100644 --- a/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.html +++ b/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.html @@ -1 +1 @@ -MemValidator

MemValidator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 20593%5 of 3083%527237012
MemValidator(Tensor[], Supplier)1011892%31583%31023301
lambda$new$1(Tensor)21285%1375%130101
lambda$new$5(Tensor[], Result, int)1292%1150%120101
lambda$new$3(Result, Tensor)11100%2100%020101
lambda$new$6(Tensor[], Result, int)10100%2100%020101
lambda$new$4(Result, Tensor)8100%2100%020101
forInputs(Tensor[], Supplier)6100%n/a010101
isWronglyIntermediate()3100%n/a010101
isWronglyNonIntermediate()3100%n/a010101
getResult()3100%n/a010101
lambda$new$2(int)3100%n/a010101
lambda$new$0(int)3100%n/a010101
\ No newline at end of file +MemValidator

MemValidator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 20593%5 of 3083%527237012
MemValidator(Tensor[], Supplier)1011892%31583%31023301
lambda$new$1(Tensor)21285%1375%130101
lambda$new$5(Tensor[], Result, int)1292%1150%120101
lambda$new$3(Result, Tensor)11100%2100%020101
lambda$new$6(Tensor[], Result, int)10100%2100%020101
lambda$new$4(Result, Tensor)8100%2100%020101
forInputs(Tensor[], Supplier)6100%n/a010101
isWronglyIntermediate()3100%n/a010101
isWronglyNonIntermediate()3100%n/a010101
getResult()3100%n/a010101
lambda$new$2(int)3100%n/a010101
lambda$new$0(int)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.java.html b/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.java.html index 1f13045c9..0f03dfa74 100644 --- a/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.java.html +++ b/docs/coverage/test/html/neureka.backend.main.memory/MemValidator.java.html @@ -113,4 +113,4 @@ public Result getResult() { return _result; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.memory/index.html b/docs/coverage/test/html/neureka.backend.main.memory/index.html index 42534d175..c285ba5a6 100644 --- a/docs/coverage/test/html/neureka.backend.main.memory/index.html +++ b/docs/coverage/test/html/neureka.backend.main.memory/index.html @@ -1 +1 @@ -neureka.backend.main.memory

neureka.backend.main.memory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total13 of 32696%6 of 4285%64125602002
MemValidator1319293%52583%52723701201
MemUtil121100%11191%1140190801
\ No newline at end of file +neureka.backend.main.memory

neureka.backend.main.memory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total13 of 32696%6 of 4285%64125602002
MemValidator1319293%52583%52723701201
MemUtil121100%11191%1140190801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.memory/index.source.html b/docs/coverage/test/html/neureka.backend.main.memory/index.source.html index a23e460e8..f73771157 100644 --- a/docs/coverage/test/html/neureka.backend.main.memory/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.memory/index.source.html @@ -1 +1 @@ -neureka.backend.main.memory

neureka.backend.main.memory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total13 of 32696%6 of 4285%64125602002
MemValidator.java1319293%52583%52723701201
MemUtil.java121100%11191%1140190801
\ No newline at end of file +neureka.backend.main.memory

neureka.backend.main.memory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total13 of 32696%6 of 4285%64125602002
MemValidator.java1319293%52583%52723701201
MemUtil.java121100%11191%1140190801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.html index 775183476..1d5aa33df 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.html @@ -1 +1 @@ -Absolute

Absolute

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Absolute()4100%n/a010201
\ No newline at end of file +Absolute

Absolute

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Absolute()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.java.html index 858fa6c0b..b212ef891 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Absolute.java.html @@ -8,4 +8,4 @@ super(ScalarFun.ABSOLUTE); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.html index c89510b1d..1cacab21e 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.html @@ -1 +1 @@ -AbstractActivationOperation

AbstractActivationOperation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 259100%1 of 1291%11004304
execute(Function, ExecutionCall)139100%4100%0302001
AbstractActivationOperation(ScalarFun)53100%n/a0101701
stringify(String[])36100%1375%130301
calculate(double[], int, int, Function[])31100%4100%030301
\ No newline at end of file +AbstractActivationOperation

AbstractActivationOperation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 259100%1 of 1291%11004304
execute(Function, ExecutionCall)139100%4100%0302001
AbstractActivationOperation(ScalarFun)53100%n/a0101701
stringify(String[])36100%1375%130301
calculate(double[], int, int, Function[])31100%4100%030301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.java.html index cd84cf075..fa48708bc 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/AbstractActivationOperation.java.html @@ -96,4 +96,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.html index e69d4d8a2..3a34732cc 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.html @@ -1 +1 @@ -Cbrt

Cbrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Cbrt()4100%n/a010201
\ No newline at end of file +Cbrt

Cbrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Cbrt()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.java.html index 83d61cc1d..41b74c702 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cbrt.java.html @@ -8,4 +8,4 @@ super(ScalarFun.CBRT); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.html index 3b426668c..24df57642 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.html @@ -1 +1 @@ -Cosinus

Cosinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Cosinus()4100%n/a010201
\ No newline at end of file +Cosinus

Cosinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Cosinus()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.java.html index 85f77b907..cbb0f60ee 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Cosinus.java.html @@ -8,4 +8,4 @@ super(ScalarFun.COSINUS); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.html index f65025554..a6b526c2b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.html @@ -1 +1 @@ -Exp

Exp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Exp()4100%n/a010201
\ No newline at end of file +Exp

Exp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Exp()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.java.html index 2b11046d0..b76e6731e 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Exp.java.html @@ -8,4 +8,4 @@ super(ScalarFun.EXP); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.html index b1afb820e..601df8f2f 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.html @@ -1 +1 @@ -GaSU

GaSU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GaSU()4100%n/a010201
\ No newline at end of file +GaSU

GaSU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GaSU()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.java.html index b5f2e433b..914d7c294 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaSU.java.html @@ -17,4 +17,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.html index 582e416a1..3d656c76d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.html @@ -1 +1 @@ -GaTU

GaTU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GaTU()4100%n/a010201
\ No newline at end of file +GaTU

GaTU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GaTU()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.java.html index e866c3c9d..ec28c8ca0 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaTU.java.html @@ -16,4 +16,4 @@ super(ScalarFun.GATU); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.html index e3298e17f..67b349c03 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.html @@ -1 +1 @@ -Gaussian

Gaussian

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Gaussian()4100%n/a010201
\ No newline at end of file +Gaussian

Gaussian

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Gaussian()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.java.html index a441c7ab5..df17cc55b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Gaussian.java.html @@ -8,4 +8,4 @@ super(ScalarFun.GAUSSIAN); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.html index f8ab2d606..229ba1707 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.html @@ -1 +1 @@ -GaussianFast

GaussianFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GaussianFast()4100%n/a010201
\ No newline at end of file +GaussianFast

GaussianFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GaussianFast()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.java.html index 7e9aa0013..ac9fd5304 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GaussianFast.java.html @@ -8,4 +8,4 @@ super(ScalarFun.GAUSSIAN_FAST); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.html index 537ffd6f3..97a7bfe6b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.html @@ -1 +1 @@ -GeLU

GeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GeLU()4100%n/a010201
\ No newline at end of file +GeLU

GeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
GeLU()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.java.html index 907740699..82ddee63b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/GeLU.java.html @@ -15,4 +15,4 @@ super(ScalarFun.GELU); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.html index fc666b148..d7104973b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.html @@ -1 +1 @@ -Identity

Identity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Identity()4100%n/a010201
\ No newline at end of file +Identity

Identity

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Identity()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.java.html index f898d4559..f628e9810 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Identity.java.html @@ -8,4 +8,4 @@ super(ScalarFun.IDENTITY); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.html index a2d879c03..d80f300f8 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.html @@ -1 +1 @@ -Log10

Log10

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Log10()4100%n/a010201
\ No newline at end of file +Log10

Log10

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Log10()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.java.html index 07dbe87b7..c816d940a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Log10.java.html @@ -8,4 +8,4 @@ super(ScalarFun.LOG10); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.html index dbe7c2eb6..584ee31bf 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.html @@ -1 +1 @@ -Logarithm

Logarithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 3183%1 of 250%130402
asDerivative(Function[], int)52281%1150%120201
Logarithm()4100%n/a010201
\ No newline at end of file +Logarithm

Logarithm

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 3183%1 of 250%130402
asDerivative(Function[], int)52281%1150%120201
Logarithm()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.java.html index fd20535f5..d40dd1191 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Logarithm.java.html @@ -14,4 +14,4 @@ return children[0].getDerivative(derivationIndex)+" / "+children[0].toString(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.html index dc37d70e5..39067ae84 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.html @@ -1 +1 @@ -Quadratic

Quadratic

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Quadratic()4100%n/a010201
\ No newline at end of file +Quadratic

Quadratic

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Quadratic()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.java.html index 90088f075..b77cebbe3 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Quadratic.java.html @@ -8,4 +8,4 @@ super(ScalarFun.QUADRATIC); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.html index 9d7ce1db5..a95eb352f 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.html @@ -1 +1 @@ -ReLU

ReLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
ReLU()4100%n/a010201
\ No newline at end of file +ReLU

ReLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
ReLU()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.java.html index b3d2bc134..48d3e5a0d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/ReLU.java.html @@ -8,4 +8,4 @@ super(ScalarFun.RELU); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.html index f83099c44..b5601e7fb 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.html @@ -1 +1 @@ -SeLU

SeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
SeLU()4100%n/a010201
\ No newline at end of file +SeLU

SeLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
SeLU()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.java.html index c7d637127..a85c5a9b2 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/SeLU.java.html @@ -19,4 +19,4 @@ super(ScalarFun.SELU); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.html index 888b707e3..656ea6ebd 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.html @@ -1 +1 @@ -SiLU

SiLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
SiLU()4100%n/a010201
\ No newline at end of file +SiLU

SiLU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
SiLU()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.java.html index 4a6e1682c..0164adc8c 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/SiLU.java.html @@ -14,4 +14,4 @@ super(ScalarFun.SILU); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.html index d96e070e7..07f7723fd 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.html @@ -1 +1 @@ -Sigmoid

Sigmoid

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Sigmoid()4100%n/a010201
\ No newline at end of file +Sigmoid

Sigmoid

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Sigmoid()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.java.html index 83bac56b3..1b7c87977 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sigmoid.java.html @@ -8,4 +8,4 @@ super(ScalarFun.SIGMOID); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.html index 5485f0b18..f53761419 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.html @@ -1 +1 @@ -Sinus

Sinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Sinus()4100%n/a010201
\ No newline at end of file +Sinus

Sinus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Sinus()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.java.html index 1be604ac0..c494dcec8 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sinus.java.html @@ -8,4 +8,4 @@ super(ScalarFun.SINUS); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.html index 37c9c4263..75dfb2fe1 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.html @@ -1 +1 @@ -Softplus

Softplus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Softplus()4100%n/a010201
\ No newline at end of file +Softplus

Softplus

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Softplus()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.java.html index b30c32065..cf27b5c73 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softplus.java.html @@ -12,4 +12,4 @@ super(ScalarFun.SOFTPLUS); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.html index c9c044eb6..c494398a0 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.html @@ -1 +1 @@ -Softsign

Softsign

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Softsign()4100%n/a010201
\ No newline at end of file +Softsign

Softsign

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Softsign()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.java.html index 33076e9af..125ad4aef 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Softsign.java.html @@ -16,4 +16,4 @@ super(ScalarFun.SOFTSIGN); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.html index 6bd667995..d9e350f64 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.html @@ -1 +1 @@ -Sqrt

Sqrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Sqrt()4100%n/a010201
\ No newline at end of file +Sqrt

Sqrt

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Sqrt()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.java.html index e3c3f87f6..2089bc70a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Sqrt.java.html @@ -8,4 +8,4 @@ super(ScalarFun.SQRT); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.html index 9bfb3f841..b9a50e0d4 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.html @@ -1 +1 @@ -Tanh

Tanh

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Tanh()4100%n/a010201
\ No newline at end of file +Tanh

Tanh

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
Tanh()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.java.html index 2db6c8a5e..1c734906e 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/Tanh.java.html @@ -9,4 +9,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.html index cf819b2db..6d5cc685d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.html @@ -1 +1 @@ -TanhFast

TanhFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
TanhFast()4100%n/a010201
\ No newline at end of file +TanhFast

TanhFast

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 4100%0 of 0n/a010201
TanhFast()4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.java.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.java.html index aca946de4..95c8965a6 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/TanhFast.java.html @@ -8,4 +8,4 @@ super(ScalarFun.TANH_FAST); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/index.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/index.html index b4e1b7931..5513fd373 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/index.html @@ -1 +1 @@ -neureka.backend.main.operations.functions

neureka.backend.main.operations.functions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total5 of 37898%2 of 1485%235091028024
Logarithm52683%1150%13040201
AbstractActivationOperation259100%11191%1100430401
TanhFast4100%n/a01020101
GaTU4100%n/a01020101
Identity4100%n/a01020101
GaSU4100%n/a01020101
Tanh4100%n/a01020101
Exp4100%n/a01020101
Sigmoid4100%n/a01020101
Sqrt4100%n/a01020101
Absolute4100%n/a01020101
SiLU4100%n/a01020101
Softplus4100%n/a01020101
Softsign4100%n/a01020101
Log104100%n/a01020101
GaussianFast4100%n/a01020101
SeLU4100%n/a01020101
Cosinus4100%n/a01020101
Gaussian4100%n/a01020101
Cbrt4100%n/a01020101
GeLU4100%n/a01020101
ReLU4100%n/a01020101
Sinus4100%n/a01020101
Quadratic4100%n/a01020101
\ No newline at end of file +neureka.backend.main.operations.functions

neureka.backend.main.operations.functions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total5 of 37898%2 of 1485%235091028024
Logarithm52683%1150%13040201
AbstractActivationOperation259100%11191%1100430401
TanhFast4100%n/a01020101
GaTU4100%n/a01020101
Identity4100%n/a01020101
GaSU4100%n/a01020101
Tanh4100%n/a01020101
Exp4100%n/a01020101
Sigmoid4100%n/a01020101
Sqrt4100%n/a01020101
Absolute4100%n/a01020101
SiLU4100%n/a01020101
Softplus4100%n/a01020101
Softsign4100%n/a01020101
Log104100%n/a01020101
GaussianFast4100%n/a01020101
SeLU4100%n/a01020101
Cosinus4100%n/a01020101
Gaussian4100%n/a01020101
Cbrt4100%n/a01020101
GeLU4100%n/a01020101
ReLU4100%n/a01020101
Sinus4100%n/a01020101
Quadratic4100%n/a01020101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.functions/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.functions/index.source.html index 8a5ae0f7a..9943ca1b5 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.functions/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.functions/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.functions

neureka.backend.main.operations.functions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total5 of 37898%2 of 1485%235091028024
Logarithm.java52683%1150%13040201
AbstractActivationOperation.java259100%11191%1100430401
GaussianFast.java4100%n/a01020101
SiLU.java4100%n/a01020101
Sigmoid.java4100%n/a01020101
Absolute.java4100%n/a01020101
ReLU.java4100%n/a01020101
GaSU.java4100%n/a01020101
GeLU.java4100%n/a01020101
GaTU.java4100%n/a01020101
Sqrt.java4100%n/a01020101
Softsign.java4100%n/a01020101
TanhFast.java4100%n/a01020101
Softplus.java4100%n/a01020101
Cbrt.java4100%n/a01020101
Quadratic.java4100%n/a01020101
Gaussian.java4100%n/a01020101
Cosinus.java4100%n/a01020101
Tanh.java4100%n/a01020101
Exp.java4100%n/a01020101
Sinus.java4100%n/a01020101
Identity.java4100%n/a01020101
Log10.java4100%n/a01020101
SeLU.java4100%n/a01020101
\ No newline at end of file +neureka.backend.main.operations.functions

neureka.backend.main.operations.functions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total5 of 37898%2 of 1485%235091028024
Logarithm.java52683%1150%13040201
AbstractActivationOperation.java259100%11191%1100430401
GaussianFast.java4100%n/a01020101
ReLU.java4100%n/a01020101
GaTU.java4100%n/a01020101
Softplus.java4100%n/a01020101
Quadratic.java4100%n/a01020101
Cosinus.java4100%n/a01020101
Tanh.java4100%n/a01020101
Exp.java4100%n/a01020101
Log10.java4100%n/a01020101
SiLU.java4100%n/a01020101
Sigmoid.java4100%n/a01020101
Absolute.java4100%n/a01020101
GaSU.java4100%n/a01020101
GeLU.java4100%n/a01020101
Sqrt.java4100%n/a01020101
Softsign.java4100%n/a01020101
TanhFast.java4100%n/a01020101
Cbrt.java4100%n/a01020101
Gaussian.java4100%n/a01020101
Sinus.java4100%n/a01020101
Identity.java4100%n/a01020101
SeLU.java4100%n/a01020101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.html b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.html index 63ca96a4c..8334f5a02 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.html @@ -1 +1 @@ -Product

Product

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total70 of 39882%7 of 3278%62396907
calculate(double[], int, int, Function[])593637%4660%3681701
calculate(double[], int, Function[])68193%1787%1501601
execute(Function, ExecutionCall)517597%21083%2712701
Product()20100%n/a010901
lambda$execute$1(int, int)7100%2100%020101
lambda$execute$2(Tensor[], Integer)5100%n/a010101
lambda$execute$0(Function, int, int)4100%n/a010101
\ No newline at end of file +Product

Product

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total70 of 39882%7 of 3278%62396907
calculate(double[], int, int, Function[])593637%4660%3681701
calculate(double[], int, Function[])68193%1787%1501601
execute(Function, ExecutionCall)517597%21083%2712701
Product()20100%n/a010901
lambda$execute$1(int, int)7100%2100%020101
lambda$execute$2(Tensor[], Integer)5100%n/a010101
lambda$execute$0(Function, int, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.java.html b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.java.html index 54eaf4f08..459637490 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Product.java.html @@ -141,4 +141,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.html b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.html index d85c0d39a..2b3220843 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.html @@ -1 +1 @@ -Summation

Summation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total26 of 18085%5 of 1668%41323705
calculate(double[], int, int, Function[])14733%3125%232301
calculate(double[], int, Function[])63685%1583%1401001
_calculate(double[], Function[])62781%1375%130701
execute(Function, ExecutionCall)64100%2100%020801
Summation()20100%n/a010901
\ No newline at end of file +Summation

Summation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total26 of 18085%5 of 1668%41323705
calculate(double[], int, int, Function[])14733%3125%232301
calculate(double[], int, Function[])63685%1583%1401001
_calculate(double[], Function[])62781%1375%130701
execute(Function, ExecutionCall)64100%2100%020801
Summation()20100%n/a010901
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.java.html b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.java.html index 9f2e19c87..51eb0803b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.indexer/Summation.java.html @@ -93,4 +93,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.html b/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.html index 805b0d071..54c4f0613 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.html @@ -1 +1 @@ -neureka.backend.main.operations.indexer

neureka.backend.main.operations.indexer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total96 of 57883%12 of 4875%10361110601202
Product7032882%72578%6239690701
Summation2615485%51168%4132370501
\ No newline at end of file +neureka.backend.main.operations.indexer

neureka.backend.main.operations.indexer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total96 of 57883%12 of 4875%10361110601202
Product7032882%72578%6239690701
Summation2615485%51168%4132370501
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.source.html index ec3a25c22..934fc80a6 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.indexer/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.indexer

neureka.backend.main.operations.indexer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total96 of 57883%12 of 4875%10361110601202
Product.java7032882%72578%6239690701
Summation.java2615485%51168%4132370501
\ No newline at end of file +neureka.backend.main.operations.indexer

neureka.backend.main.operations.indexer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total96 of 57883%12 of 4875%10361110601202
Product.java7032882%72578%6239690701
Summation.java2615485%51168%4132370501
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.html index 7f227910e..3a3db31ac 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.html @@ -1 +1 @@ -AXPY

AXPY

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4993%0 of 4100%151713
AXPY()30%n/a111111
invoke(double[], int, double, double[], int, int, int)23100%2100%020301
invoke(float[], int, float, float[], int, int, int)23100%2100%020301
\ No newline at end of file +AXPY

AXPY

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4993%0 of 4100%151713
AXPY()30%n/a111111
invoke(double[], int, double, double[], int, int, int)23100%2100%020301
invoke(float[], int, float, float[], int, int, int)23100%2100%020301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.java.html index 5c84e45b9..4d231a4d0 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/AXPY.java.html @@ -59,4 +59,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.html index 57ad936b9..111bbcffb 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.html @@ -1 +1 @@ -COPY

COPY

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2185%0 of 0n/a121512
COPY()30%n/a111111
copyOf(Object[])18100%n/a010401
\ No newline at end of file +COPY

COPY

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2185%0 of 0n/a121512
COPY()30%n/a111111
copyOf(Object[])18100%n/a010401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.java.html index abb003114..f42a80cbc 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/COPY.java.html @@ -39,4 +39,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.html index 4d37b612f..e6751d72a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.html @@ -1 +1 @@ -DOT

DOT

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total119 of 59580%2 of 1687%31799319
unrolled04(long[], int, long[], int, int, int)588258%1375%1342201
unrolled04(int[], int, int[], int, int, int)588258%1375%1342201
DOT()30%n/a111111
unrolled04(double[], int, double[], int, int, int)140100%4100%0302201
unrolled04(float[], int, float[], int, int, int)140100%4100%0302201
invoke(double[], int, double[], int, int, int)8100%n/a010101
invoke(float[], int, float[], int, int, int)8100%n/a010101
invoke(long[], int, long[], int, int, int)8100%n/a010101
invoke(int[], int, int[], int, int, int)8100%n/a010101
\ No newline at end of file +DOT

DOT

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total119 of 59580%2 of 1687%31799319
unrolled04(long[], int, long[], int, int, int)588258%1375%1342201
unrolled04(int[], int, int[], int, int, int)588258%1375%1342201
DOT()30%n/a111111
unrolled04(double[], int, double[], int, int, int)140100%4100%0302201
unrolled04(float[], int, float[], int, int, int)140100%4100%0302201
invoke(double[], int, double[], int, int, int)8100%n/a010101
invoke(float[], int, float[], int, int, int)8100%n/a010101
invoke(long[], int, long[], int, int, int)8100%n/a010101
invoke(int[], int, int[], int, int, int)8100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.java.html index f9e08ccbd..0e6287292 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/DOT.java.html @@ -179,4 +179,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.html index 336ad2f75..afbf96717 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.html @@ -1 +1 @@ -GEMM

GEMM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2,36199%2 of 12898%31011517137
GEMM()0%n/a111111
full_F64_5x5_CM(double[], double[], int, double[])374100%2100%0209701
full_F64_4x4_CM(double[], double[], int, double[])253100%2100%0206601
full_F64_0xN_CM(double[], double[], int, double[])195100%4100%0303701
full_F64_9xN_CM(double[], double[], int, double[])179100%4100%0303401
full_F64_8xN_CM(double[], double[], int, double[])163100%4100%0303101
full_F64_3x3_CM(double[], double[], int, double[])156100%2100%0204101
full_F64_7xN_CM(double[], double[], int, double[])147100%4100%0302801
full_F64_6xN_CM(double[], double[], int, double[])131100%4100%0302501
operationForF64(boolean, long, long)124100%14998%12603001
full_F64_2x2_CM(double[], double[], int, double[])83100%2100%0202201
operationForF32(boolean, long, long)40100%11593%1901701
partial_F64_MxN_CM(double[], int, int, double[], int, double[])38100%4100%030501
partial_F64_MxN_RM(double[], int, int, double[], int, double[])38100%4100%030501
partial_F32_MxN_CM(float[], int, int, float[], int, float[])38100%4100%030501
partial_F32_MxN_RM(float[], int, int, float[], int, float[])38100%4100%030501
full_F64_1x1_CM(double[], double[], int, double[])31100%2100%020601
full_F64_Mx1_RM(double[], double[], int, double[])28100%2100%020601
full_F32_Mx1_RM(float[], float[], int, float[])28100%2100%020601
full_F64_1xN_RM(double[], double[], int, double[])25100%2100%020401
full_F32_1xN_RM(float[], float[], int, float[])25100%2100%020401
full_F64_Mx1_CM(double[], double[], int, double[])23100%2100%020401
full_F32_Mx1_CM(float[], float[], int, float[])23100%2100%020401
full_F64_1xN_CM(double[], double[], int, double[])23100%2100%020301
full_F32_1xN_CM(float[], float[], int, float[])23100%2100%020301
threaded_F64_MxN_CM(double[], double[], int, double[])14100%n/a010401
threaded_F32_MxN_CM(float[], float[], int, float[])14100%n/a010401
threaded_F32_MxN_RM(float[], float[], int, float[])14100%n/a010401
threaded_F64_MxN_RM(double[], double[], int, double[])14100%n/a010401
full_F64_MxN_CM(double[], double[], int, double[])11100%n/a010201
full_F64_MxN_RM(double[], double[], int, double[])11100%n/a010201
full_F32_MxN_CM(float[], float[], int, float[])11100%n/a010201
full_F32_MxN_RM(float[], float[], int, float[])11100%n/a010201
lambda$threaded_F64_MxN_RM$3(double[], double[], int, double[], int, int)8100%n/a010101
lambda$threaded_F32_MxN_RM$2(float[], float[], int, float[], int, int)8100%n/a010101
lambda$threaded_F32_MxN_CM$1(float[], float[], int, float[], int, int)8100%n/a010101
lambda$threaded_F64_MxN_CM$0(double[], double[], int, double[], int, int)8100%n/a010101
\ No newline at end of file +GEMM

GEMM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2,36199%2 of 12898%31011517137
GEMM()0%n/a111111
full_F64_5x5_CM(double[], double[], int, double[])374100%2100%0209701
full_F64_4x4_CM(double[], double[], int, double[])253100%2100%0206601
full_F64_0xN_CM(double[], double[], int, double[])195100%4100%0303701
full_F64_9xN_CM(double[], double[], int, double[])179100%4100%0303401
full_F64_8xN_CM(double[], double[], int, double[])163100%4100%0303101
full_F64_3x3_CM(double[], double[], int, double[])156100%2100%0204101
full_F64_7xN_CM(double[], double[], int, double[])147100%4100%0302801
full_F64_6xN_CM(double[], double[], int, double[])131100%4100%0302501
operationForF64(boolean, long, long)124100%14998%12603001
full_F64_2x2_CM(double[], double[], int, double[])83100%2100%0202201
operationForF32(boolean, long, long)40100%11593%1901701
partial_F64_MxN_CM(double[], int, int, double[], int, double[])38100%4100%030501
partial_F64_MxN_RM(double[], int, int, double[], int, double[])38100%4100%030501
partial_F32_MxN_CM(float[], int, int, float[], int, float[])38100%4100%030501
partial_F32_MxN_RM(float[], int, int, float[], int, float[])38100%4100%030501
full_F64_1x1_CM(double[], double[], int, double[])31100%2100%020601
full_F64_Mx1_RM(double[], double[], int, double[])28100%2100%020601
full_F32_Mx1_RM(float[], float[], int, float[])28100%2100%020601
full_F64_1xN_RM(double[], double[], int, double[])25100%2100%020401
full_F32_1xN_RM(float[], float[], int, float[])25100%2100%020401
full_F64_Mx1_CM(double[], double[], int, double[])23100%2100%020401
full_F32_Mx1_CM(float[], float[], int, float[])23100%2100%020401
full_F64_1xN_CM(double[], double[], int, double[])23100%2100%020301
full_F32_1xN_CM(float[], float[], int, float[])23100%2100%020301
threaded_F64_MxN_CM(double[], double[], int, double[])14100%n/a010401
threaded_F32_MxN_CM(float[], float[], int, float[])14100%n/a010401
threaded_F32_MxN_RM(float[], float[], int, float[])14100%n/a010401
threaded_F64_MxN_RM(double[], double[], int, double[])14100%n/a010401
full_F64_MxN_CM(double[], double[], int, double[])11100%n/a010201
full_F64_MxN_RM(double[], double[], int, double[])11100%n/a010201
full_F32_MxN_CM(float[], float[], int, float[])11100%n/a010201
full_F32_MxN_RM(float[], float[], int, float[])11100%n/a010201
lambda$threaded_F64_MxN_RM$3(double[], double[], int, double[], int, int)8100%n/a010101
lambda$threaded_F32_MxN_RM$2(float[], float[], int, float[], int, int)8100%n/a010101
lambda$threaded_F32_MxN_CM$1(float[], float[], int, float[], int, int)8100%n/a010101
lambda$threaded_F64_MxN_CM$0(double[], double[], int, double[], int, int)8100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.java.html index 79e94a0e3..c767ca9d8 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/GEMM.java.html @@ -963,4 +963,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.html index a395fbe19..05bdbe2e2 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.html @@ -1 +1 @@ -IAXPY

IAXPY

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4993%0 of 4100%151713
IAXPY()30%n/a111111
invoke(long[], int, long, long[], int, int, int)23100%2100%020301
invoke(int[], int, int, int[], int, int, int)23100%2100%020301
\ No newline at end of file +IAXPY

IAXPY

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4993%0 of 4100%151713
IAXPY()30%n/a111111
invoke(long[], int, long, long[], int, int, int)23100%2100%020301
invoke(int[], int, int, int[], int, int, int)23100%2100%020301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.java.html index 1f9a195c9..3f8a7ab63 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IAXPY.java.html @@ -59,4 +59,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.html index a1992d0f2..9c6544ddc 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.html @@ -1 +1 @@ -IDOT

IDOT

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total119 of 29960%2 of 875%3994715
unrolled04(long[], int, long[], int, int, int)588258%1375%1342201
unrolled04(int[], int, int[], int, int, int)588258%1375%1342201
IDOT()30%n/a111111
invoke(long[], int, long[], int, int, int)8100%n/a010101
invoke(int[], int, int[], int, int, int)8100%n/a010101
\ No newline at end of file +IDOT

IDOT

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total119 of 29960%2 of 875%3994715
unrolled04(long[], int, long[], int, int, int)588258%1375%1342201
unrolled04(int[], int, int[], int, int, int)588258%1375%1342201
IDOT()30%n/a111111
invoke(long[], int, long[], int, int, int)8100%n/a010101
invoke(int[], int, int[], int, int, int)8100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.java.html index 5a90fa4ca..7afd9756f 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IDOT.java.html @@ -105,4 +105,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.html index 3a07cd75c..8f467015d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.html @@ -1 +1 @@ -IGEMM

IGEMM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2,36199%2 of 12898%31011517137
IGEMM()0%n/a111111
full_I64_5x5_CM(long[], long[], int, long[])374100%2100%0209701
full_I64_4x4_CM(long[], long[], int, long[])253100%2100%0206601
full_I64_0xN_CM(long[], long[], int, long[])195100%4100%0303701
full_I64_9xN_CM(long[], long[], int, long[])179100%4100%0303401
full_I64_8xN_CM(long[], long[], int, long[])163100%4100%0303101
full_I64_3x3_CM(long[], long[], int, long[])156100%2100%0204101
full_I64_7xN_CM(long[], long[], int, long[])147100%4100%0302801
full_I64_6xN_CM(long[], long[], int, long[])131100%4100%0302501
operationForI64(boolean, long, long)124100%14998%12603001
full_I64_2x2_CM(long[], long[], int, long[])83100%2100%0202201
operationForI32(boolean, long, long)40100%11593%1901701
partial_I64_MxN_CM(long[], int, int, long[], int, long[])38100%4100%030501
partial_I64_MxN_RM(long[], int, int, long[], int, long[])38100%4100%030501
partial_I32_MxN_CM(int[], int, int, int[], int, int[])38100%4100%030501
partial_I32_MxN_RM(int[], int, int, int[], int, int[])38100%4100%030501
full_I64_1x1_CM(long[], long[], int, long[])31100%2100%020601
full_I64_Mx1_RM(long[], long[], int, long[])28100%2100%020601
full_I32_Mx1_RM(int[], int[], int, int[])28100%2100%020601
full_I64_1xN_RM(long[], long[], int, long[])25100%2100%020401
full_I32_1xN_RM(int[], int[], int, int[])25100%2100%020401
full_I64_Mx1_CM(long[], long[], int, long[])23100%2100%020401
full_I32_Mx1_CM(int[], int[], int, int[])23100%2100%020401
full_I64_1xN_CM(long[], long[], int, long[])23100%2100%020301
full_I32_1xN_CM(int[], int[], int, int[])23100%2100%020301
threaded_I64_MxN_CM(long[], long[], int, long[])14100%n/a010401
threaded_I32_MxN_CM(int[], int[], int, int[])14100%n/a010401
threaded_I32_MxN_RM(int[], int[], int, int[])14100%n/a010401
threaded_I64_MxN_RM(long[], long[], int, long[])14100%n/a010401
full_I64_MxN_CM(long[], long[], int, long[])11100%n/a010201
full_I64_MxN_RM(long[], long[], int, long[])11100%n/a010201
full_I32_MxN_CM(int[], int[], int, int[])11100%n/a010201
full_I32_MxN_RM(int[], int[], int, int[])11100%n/a010201
lambda$threaded_I64_MxN_RM$3(long[], long[], int, long[], int, int)8100%n/a010101
lambda$threaded_I32_MxN_RM$2(int[], int[], int, int[], int, int)8100%n/a010101
lambda$threaded_I32_MxN_CM$1(int[], int[], int, int[], int, int)8100%n/a010101
lambda$threaded_I64_MxN_CM$0(long[], long[], int, long[], int, int)8100%n/a010101
\ No newline at end of file +IGEMM

IGEMM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2,36199%2 of 12898%31011517137
IGEMM()0%n/a111111
full_I64_5x5_CM(long[], long[], int, long[])374100%2100%0209701
full_I64_4x4_CM(long[], long[], int, long[])253100%2100%0206601
full_I64_0xN_CM(long[], long[], int, long[])195100%4100%0303701
full_I64_9xN_CM(long[], long[], int, long[])179100%4100%0303401
full_I64_8xN_CM(long[], long[], int, long[])163100%4100%0303101
full_I64_3x3_CM(long[], long[], int, long[])156100%2100%0204101
full_I64_7xN_CM(long[], long[], int, long[])147100%4100%0302801
full_I64_6xN_CM(long[], long[], int, long[])131100%4100%0302501
operationForI64(boolean, long, long)124100%14998%12603001
full_I64_2x2_CM(long[], long[], int, long[])83100%2100%0202201
operationForI32(boolean, long, long)40100%11593%1901701
partial_I64_MxN_CM(long[], int, int, long[], int, long[])38100%4100%030501
partial_I64_MxN_RM(long[], int, int, long[], int, long[])38100%4100%030501
partial_I32_MxN_CM(int[], int, int, int[], int, int[])38100%4100%030501
partial_I32_MxN_RM(int[], int, int, int[], int, int[])38100%4100%030501
full_I64_1x1_CM(long[], long[], int, long[])31100%2100%020601
full_I64_Mx1_RM(long[], long[], int, long[])28100%2100%020601
full_I32_Mx1_RM(int[], int[], int, int[])28100%2100%020601
full_I64_1xN_RM(long[], long[], int, long[])25100%2100%020401
full_I32_1xN_RM(int[], int[], int, int[])25100%2100%020401
full_I64_Mx1_CM(long[], long[], int, long[])23100%2100%020401
full_I32_Mx1_CM(int[], int[], int, int[])23100%2100%020401
full_I64_1xN_CM(long[], long[], int, long[])23100%2100%020301
full_I32_1xN_CM(int[], int[], int, int[])23100%2100%020301
threaded_I64_MxN_CM(long[], long[], int, long[])14100%n/a010401
threaded_I32_MxN_CM(int[], int[], int, int[])14100%n/a010401
threaded_I32_MxN_RM(int[], int[], int, int[])14100%n/a010401
threaded_I64_MxN_RM(long[], long[], int, long[])14100%n/a010401
full_I64_MxN_CM(long[], long[], int, long[])11100%n/a010201
full_I64_MxN_RM(long[], long[], int, long[])11100%n/a010201
full_I32_MxN_CM(int[], int[], int, int[])11100%n/a010201
full_I32_MxN_RM(int[], int[], int, int[])11100%n/a010201
lambda$threaded_I64_MxN_RM$3(long[], long[], int, long[], int, int)8100%n/a010101
lambda$threaded_I32_MxN_RM$2(int[], int[], int, int[], int, int)8100%n/a010101
lambda$threaded_I32_MxN_CM$1(int[], int[], int, int[], int, int)8100%n/a010101
lambda$threaded_I64_MxN_CM$0(long[], long[], int, long[], int, int)8100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.java.html index b79d9ff60..ff9376398 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/IGEMM.java.html @@ -963,4 +963,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.html index 3d519137d..a3c53c50e 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.html @@ -1 +1 @@ -neureka.backend.main.operations.linear.internal.blas

neureka.backend.main.operations.linear.internal.blas

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total253 of 5,73595%8 of 28897%15240231,19379607
DOT11947680%21487%3179931901
IDOT11918060%2675%399471501
GEMM2,35899%212698%3101151713701
IGEMM2,35899%212698%3101151713701
IAXPY4693%4100%15171301
AXPY4693%4100%15171301
COPY85%n/a12151201
\ No newline at end of file +neureka.backend.main.operations.linear.internal.blas

neureka.backend.main.operations.linear.internal.blas

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total253 of 5,73595%8 of 28897%15240231,19379607
DOT11947680%21487%3179931901
IDOT11918060%2675%399471501
GEMM2,35899%212698%3101151713701
IGEMM2,35899%212698%3101151713701
IAXPY4693%4100%15171301
AXPY4693%4100%15171301
COPY85%n/a12151201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.source.html index 086828b47..abaf303fa 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.blas/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.linear.internal.blas

neureka.backend.main.operations.linear.internal.blas

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total253 of 5,73595%8 of 28897%15240231,19379607
DOT.java11947680%21487%3179931901
IDOT.java11918060%2675%399471501
IGEMM.java2,35899%212698%3101151713701
GEMM.java2,35899%212698%3101151713701
IAXPY.java4693%4100%15171301
AXPY.java4693%4100%15171301
COPY.java85%n/a12151201
\ No newline at end of file +neureka.backend.main.operations.linear.internal.blas

neureka.backend.main.operations.linear.internal.blas

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total253 of 5,73595%8 of 28897%15240231,19379607
DOT.java11947680%21487%3179931901
IDOT.java11918060%2675%399471501
GEMM.java2,35899%212698%3101151713701
IGEMM.java2,35899%212698%3101151713701
AXPY.java4693%4100%15171301
IAXPY.java4693%4100%15171301
COPY.java85%n/a12151201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.html index 3adf5535a..25c0b0d88 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.html @@ -1 +1 @@ -CLGEMM

CLGEMM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 32094%4 of 1877%51413215
run(ExecutionCall)1321794%41071%4812601
lambda$run$1()50%n/a111111
lambda$run$0(int, int, int, int, int, int, String)81100%4100%030501
CLGEMM()3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +CLGEMM

CLGEMM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 32091%7 of 1861%81413215
run(ExecutionCall)1321794%5964%5812601
lambda$run$0(int, int, int, int, int, int, String)107187%2250%230501
lambda$run$1()50%n/a111111
CLGEMM()3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.java.html index a6037b85e..1b5d2f70a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLGEMM.java.html @@ -34,7 +34,7 @@ for ( int s : new int[]{16,8,4,2,1} ) if ( M % s == 0 ) { MW = s; break; } for ( int s : new int[]{8,4,2,1} ) - if ( N % s == 0 && K % s == 0 ) { KW = s; break; } + if ( N % s == 0 && K % s == 0 ) { KW = s; break; } int NW = KW; @@ -49,8 +49,8 @@ " #define KW "+ finalKW +" // K tile Width \n" + " #define MT "+(int)Math.floor(M/ finalMW)+" // MT is max for 'mt' (M tile count) \n" + " #define KT "+(int)Math.floor(K/ finalKW)+" // KT is max for 'kt' (K tile count) \n" + - " #define floatMW "+(finalMW != 1 ? "float"+ finalMW : "float")+" \n" + - " #define floatKW "+(finalKW != 1 ? "float"+ finalKW : "float")+" \n" + + " #define floatMW "+(finalMW != 1 ? "float"+ finalMW : "float")+" \n" + + " #define floatKW "+(finalKW != 1 ? "float"+ finalKW : "float")+" \n" + " __kernel void "+kernelName+"( \n" + " const __global floatMW* restrict A, \n" + " const __global floatKW* restrict B, \n" + @@ -100,4 +100,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce$Type.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce$Type.html index 909be9c9e..b7b44fd05 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce$Type.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce$Type.html @@ -1 +1 @@ -CLReduce.Type

CLReduce.Type

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 15100%0 of 0n/a010101
static {...}15100%n/a010101
\ No newline at end of file +CLReduce.Type

CLReduce.Type

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 24100%0 of 0n/a010101
static {...}24100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.html index 1567c6646..9bd48823c 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.html @@ -1 +1 @@ -CLReduce

CLReduce

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total64 of 37682%11 of 2556%112155518
_runRecursively(Tensor, OpenCLDevice)2513784%4450%3532501
CLReduce(CLReduce.Type)122062%1266%131801
lambda$_fetch$2(String)120%n/a111111
_fetch(Tensor, Tensor, OpenCLDevice)85988%1150%1211101
run(ExecutionCall)76289%5758%570801
lambda$_runRecursively$0(long, String)20100%n/a010101
lambda$_fetch$1(String)11100%n/a010101
static {...}3100%n/a010101
\ No newline at end of file +CLReduce

CLReduce

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total96 of 37674%13 of 2548%132165528
_runRecursively(Tensor, OpenCLDevice)2513784%4450%3532501
lambda$_runRecursively$0(long, String)200%n/a111111
run(ExecutionCall)195072%7541%670801
CLReduce(CLReduce.Type)122062%1266%131801
lambda$_fetch$2(String)120%n/a111111
_fetch(Tensor, Tensor, OpenCLDevice)85988%1150%1211101
lambda$_fetch$1(String)11100%n/a010101
static {...}3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.java.html index eb0a1250a..06708f301 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLReduce.java.html @@ -35,7 +35,7 @@ public Tensor<Integer> run(ExecutionCall<OpenCLDevice> call) { CLBackend context = Neureka.get().backend().find(CLBackend.class).orElse(null); CLSettings settings = context == null ? null : context.getSettings(); - boolean autoConvert = context == null || settings.isAutoConvertToFloat(); + boolean autoConvert = context == null || settings.isAutoConvertToFloat(); if ( settings != null ) settings.setAutoConvertToFloat(false); Tensor<Float> in = call.input(0) == null ? call.input(Float.class, 1) : call.input(Float.class, 0); int index = _runRecursively(in, call.getDevice()); @@ -67,7 +67,7 @@ String kernelName = "fast_"+_type.name().toLowerCase()+"_reduce_RTS"+RTS; Supplier<String> code = () -> - " #define RTS "+RTS+" \n" + + " #define RTS "+RTS+" \n" + " __kernel void "+kernelName+"( \n" + " const int size, \n" + " const __global float* in, \n" + @@ -151,4 +151,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.html index 04336f49e..284caf499 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.html @@ -1 +1 @@ -CLSum

CLSum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 21098%1 of 887%11113407
run(Tensor, OpenCLDevice)414097%1787%1512401
_processPrivate(long, OpenCLDevice)18100%n/a010301
lambda$_processPrivate$0(long, String)15100%n/a010101
lambda$_processLocal$1(String)11100%n/a010101
_processLocal(OpenCLDevice)10100%n/a010301
run(ExecutionCall)9100%n/a010101
CLSum()3100%n/a010101
\ No newline at end of file +CLSum

CLSum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total30 of 21085%1 of 887%31133427
lambda$_processPrivate$0(long, String)150%n/a111111
lambda$_processLocal$1(String)110%n/a111111
run(Tensor, OpenCLDevice)414097%1787%1512401
_processPrivate(long, OpenCLDevice)18100%n/a010301
_processLocal(OpenCLDevice)10100%n/a010301
run(ExecutionCall)9100%n/a010101
CLSum()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.java.html index 4b22bd383..1b302d78d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/CLSum.java.html @@ -67,7 +67,7 @@ { String kernelName = "fast_private_sum_reduction_RTS"+RTS; Supplier<String> code = () -> - " #define RTS "+RTS+" \n" + + " #define RTS "+RTS+" \n" + " __kernel void "+kernelName+"( \n" + " const int size, \n" + " const __global float* in, \n" + @@ -95,7 +95,7 @@ ) { String kernelName = "fast_local_mem_based_sum"; Supplier<String> code = () -> - " \n" + + " \n" + " int div(int i) { return i % 2 == 1 ? (i+1) / 2 : i / 2; } \n" + " \n" + " __kernel void "+kernelName+" ( \n" + @@ -132,4 +132,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.html index e9828a375..f4f55bd92 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.html @@ -1 +1 @@ -neureka.backend.main.operations.linear.internal.opencl

neureka.backend.main.operations.linear.internal.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total86 of 92190%16 of 5168%1747712222104
CLReduce6431282%111456%11215551801
CLGEMM1830294%41477%5141321501
CLSum420698%1787%1111340701
CLReduce.Type15100%n/a01010101
\ No newline at end of file +neureka.backend.main.operations.linear.internal.opencl

neureka.backend.main.operations.linear.internal.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total154 of 93083%21 of 5158%24471012252104
CLReduce9628074%131248%13216552801
CLSum3018085%1787%3113342701
CLGEMM2829291%71161%8141321501
CLReduce.Type24100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.source.html index 71f927b16..a14d94375 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear.internal.opencl/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.linear.internal.opencl

neureka.backend.main.operations.linear.internal.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total86 of 92190%16 of 5168%1747712222104
CLReduce.java6432783%111456%11225561902
CLGEMM.java1830294%41477%5141321501
CLSum.java420698%1787%1111340701
\ No newline at end of file +neureka.backend.main.operations.linear.internal.opencl

neureka.backend.main.operations.linear.internal.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total154 of 93083%21 of 5158%24471012252104
CLReduce.java9630476%131248%13226562902
CLSum.java3018085%1787%3113342701
CLGEMM.java2829291%71161%8141321501
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.html index 627d24e47..3baf1460f 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.html @@ -1 +1 @@ -Convolution

Convolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total117 of 47775%25 of 5453%20381274111
lambda$new$3(Function, ExecutionCall)598157%141246%111451701
reducePairwise(Function)33921%3125%234701
lambda$new$0(ExecutionCall)182255%5337%352601
calculate(double[], int, int, Function[])70%n/a111111
execute(Function, ExecutionCall)132100%21285%2801401
lambda$new$5(ExecutionCall)47100%1150%120801
Convolution()35100%n/a0101401
lambda$new$2(Function, Tensor, Shape, Number, ADTarget)23100%n/a010301
lambda$new$4(Function, ExecutionCall)7100%n/a010201
lambda$new$1(ExecutionCall)3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +Convolution

Convolution

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total117 of 47775%25 of 5453%20381274111
lambda$new$3(Function, ExecutionCall)598157%141246%111451701
reducePairwise(Function)33921%3125%234701
lambda$new$0(ExecutionCall)182255%5337%352601
calculate(double[], int, int, Function[])70%n/a111111
execute(Function, ExecutionCall)132100%21285%2801401
lambda$new$5(ExecutionCall)47100%1150%120801
Convolution()35100%n/a0101401
lambda$new$2(Function, Tensor, Shape, Number, ADTarget)23100%n/a010301
lambda$new$4(Function, ExecutionCall)7100%n/a010201
lambda$new$1(ExecutionCall)3100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.java.html index ad996c066..c19b0db75 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/Convolution.java.html @@ -153,4 +153,4 @@ @Override public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.html index 68054c0a5..149912926 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.html @@ -1 +1 @@ -DotProduct

DotProduct

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 3287%0 of 0n/a1211212
calculate(double[], int, int, Function[])40%n/a111111
DotProduct()28100%n/a0101101
\ No newline at end of file +DotProduct

DotProduct

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 3287%0 of 0n/a1211212
calculate(double[], int, int, Function[])40%n/a111111
DotProduct()28100%n/a0101101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.java.html index 80c245b4e..b87315953 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/DotProduct.java.html @@ -28,4 +28,4 @@ throw new UnsupportedOperationException(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.html index 0c5cfcb1f..a9a9e95d5 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.html @@ -1 +1 @@ -MatMul

MatMul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 3580%0 of 0n/a1211212
calculate(double[], int, int, Function[])70%n/a111111
MatMul()28100%n/a0101101
\ No newline at end of file +MatMul

MatMul

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total73 of 12039%4 of 633%4792514
execute(Function, ExecutionCall)331023%1150%124601
reducePairwise(Function)33921%3125%234701
calculate(double[], int, int, Function[])70%n/a111111
MatMul()28100%n/a0101101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.java.html index 2f367da2f..c23a3339c 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/MatMul.java.html @@ -1,31 +1,68 @@ MatMul.java

MatMul.java

package neureka.backend.main.operations.linear;
 
+import neureka.Neureka;
+import neureka.backend.api.ExecutionCall;
+import neureka.backend.api.Result;
+import neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm;
 import neureka.backend.api.template.operations.AbstractOperation;
 import neureka.backend.api.template.operations.OperationBuilder;
 import neureka.backend.main.algorithms.MatMulAlgorithm;
 import neureka.math.Function;
+import neureka.math.args.Arg;
+import neureka.math.parsing.FunctionParser;
 
 public class MatMul extends AbstractOperation
 {
     public MatMul()
     {
-        super(
+        super(
             new OperationBuilder()
-                .identifier(       "matMul"    )
-                .operator(         "@"         )
-                .arity(            2           )
-                .isOperator(       true        )
-                .isIndexer(        false       )
-                .isDifferentiable( true        )
-                .isInline(         false       )
+                .identifier(       "matMul"    )
+                .operator(         "@"         )
+                .arity(            2           )
+                .isOperator(       true        )
+                .isIndexer(        false       )
+                .isDifferentiable( true        )
+                .isInline(         false       )
         );
 
-        setAlgorithm(
-            new MatMulAlgorithm().buildFunAlgorithm()
+        setAlgorithm(
+            new MatMulAlgorithm().buildFunAlgorithm()
         );
-    }
+    }
 
     @Override
-    public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); }
+    public Result execute( final Function caller, final ExecutionCall<?> call )
+    {
+        if ( !caller.isFlat() ) {
+            Function reducedCaller = reducePairwise(caller);
+            ExecutionCall<?> flatCall = AbstractDeviceAlgorithm.flatten( reducedCaller, call.withArgs(Arg.DerivIdx.of(-1)) );
+            Function flat = new FunctionParser(Neureka.get().backend()).parse( flatCall.getOperation(), flatCall.arity(), true );
+            return super.execute( flat, flatCall );
+        }
+        return super.execute( reducePairwise(caller), call );
+    }
+
+    private Function reducePairwise( final Function fun ) {
+        Function reduced = fun;
+        if ( reduced.getSubFunctions().size() > 2 ) {
+            /*
+                So currently we have something like this: a@b@c@d...
+                However, this is how it is really executed:  ((((a@b)@c)@d)..)
+                ...so let's create a function that is nested like the above:
+            */
+            Function nested = reduced.getSubFunctions().get(0);
+            for ( int i = 1; i < reduced.getSubFunctions().size(); i++ )
+                nested = Function.of( nested + " @ " + reduced.getSubFunctions().get(i), true );
+
+            reduced = nested;
+        }
+        return reduced;
+    }
+
+    @Override
+    public double calculate( double[] inputs, int j, int d, Function[] src ) {
+        return src[ 0 ].call( inputs, j );
+    }
 }
-
\ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.html index 18f2ba0b3..1be2a96f1 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.html @@ -1 +1 @@ -XConvLeft

XConvLeft

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 7290%0 of 4100%1511813
calculate(double[], int, int, Function[])70%n/a111111
stringify(String[])39100%4100%030601
XConvLeft()26100%n/a0101101
\ No newline at end of file +XConvLeft

XConvLeft

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 7290%0 of 4100%1511813
calculate(double[], int, int, Function[])70%n/a111111
stringify(String[])39100%4100%030601
XConvLeft()26100%n/a0101101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.java.html index 2d1f4202f..67d094ff7 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvLeft.java.html @@ -42,4 +42,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.html index 6aab209c0..35390143d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.html @@ -1 +1 @@ -XConvRight

XConvRight

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 6797%0 of 4100%1511813
calculate(double[], int, int, Function[])20%n/a111111
stringify(String[])39100%4100%030601
XConvRight()26100%n/a0101101
\ No newline at end of file +XConvRight

XConvRight

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 6797%0 of 4100%1511813
calculate(double[], int, int, Function[])20%n/a111111
stringify(String[])39100%4100%030601
XConvRight()26100%n/a0101101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.java.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.java.html index 4a08ab9d5..b56fa2616 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/XConvRight.java.html @@ -42,4 +42,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/index.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/index.html index 0010ef015..574ab6cf6 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/index.html @@ -1 +1 @@ -neureka.backend.main.operations.linear

neureka.backend.main.operations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total137 of 68379%25 of 6259%24521613452105
Convolution11736075%252953%2038127411101
XConvLeft76590%4100%151181301
MatMul72880%n/a121121201
DotProduct42887%n/a121121201
XConvRight6597%4100%151181301
\ No newline at end of file +neureka.backend.main.operations.linear

neureka.backend.main.operations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total203 of 76873%29 of 6857%27572414752305
Convolution11736075%252953%2038127411101
MatMul734739%4233%479251401
XConvLeft76590%4100%151181301
DotProduct42887%n/a121121201
XConvRight6597%4100%151181301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.linear/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.linear/index.source.html index 9b13d328b..9635ec57a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.linear/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.linear/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.linear

neureka.backend.main.operations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total137 of 68379%25 of 6259%24521613452105
Convolution.java11736075%252953%2038127411101
XConvLeft.java76590%4100%151181301
MatMul.java72880%n/a121121201
DotProduct.java42887%n/a121121201
XConvRight.java6597%4100%151181301
\ No newline at end of file +neureka.backend.main.operations.linear

neureka.backend.main.operations.linear

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total203 of 76873%29 of 6857%27572414752305
Convolution.java11736075%252953%2038127411101
MatMul.java734739%4233%479251401
XConvLeft.java76590%4100%151181301
DotProduct.java42887%n/a121121201
XConvRight.java6597%4100%151181301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.html index 4eb09e475..2b033e170 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.html @@ -1 +1 @@ -Addition

Addition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 68295%12 of 6681%11523121019
execute(Function, ExecutionCall)1823292%71770%61323801
lambda$new$3(Function, ExecutionCall)91664%2250%231501
lambda$new$4(Function, ExecutionCall)3995%3770%360601
calculate(double[], int, int, Function[])65100%8100%0501101
Addition()57100%n/a0102001
calculate(double[], int, Function[])55100%6100%0401001
reducePairwise(Function)42100%4100%030701
lambda$_autogradBroadcast$6(Device, Tensor, Tensor, int, ADTarget)39100%n/a010901
_autogradBroadcast(ExecutionCall)31100%2100%020501
asDerivative(Function[], int)25100%n/a010501
lambda$execute$7(int, int, Function)17100%8100%050101
lambda$execute$8(Function, int, int)8100%n/a010101
lambda$new$5(Function, ExecutionCall)8100%n/a010201
lambda$new$1(Function, ExecutionCall)5100%n/a010101
lambda$asDerivative$10(int, Function)4100%n/a010101
lambda$asDerivative$9(int, Function)4100%n/a010101
lambda$new$0(ExecutionCall)3100%n/a010101
lambda$new$2(ExecutionCall)100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +Addition

Addition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 68295%12 of 6681%11523121019
execute(Function, ExecutionCall)1823292%71770%61323801
lambda$new$3(Function, ExecutionCall)91664%2250%231501
lambda$new$4(Function, ExecutionCall)3995%3770%360601
calculate(double[], int, int, Function[])65100%8100%0501101
Addition()57100%n/a0102001
calculate(double[], int, Function[])55100%6100%0401001
reducePairwise(Function)42100%4100%030701
lambda$_autogradBroadcast$6(Device, Tensor, Tensor, int, ADTarget)39100%n/a010901
_autogradBroadcast(ExecutionCall)31100%2100%020501
asDerivative(Function[], int)25100%n/a010501
lambda$execute$7(int, int, Function)17100%8100%050101
lambda$execute$8(Function, int, int)8100%n/a010101
lambda$new$5(Function, ExecutionCall)8100%n/a010201
lambda$new$1(Function, ExecutionCall)5100%n/a010101
lambda$asDerivative$10(int, Function)4100%n/a010101
lambda$asDerivative$9(int, Function)4100%n/a010101
lambda$new$0(ExecutionCall)3100%n/a010101
lambda$new$2(ExecutionCall)100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.java.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.java.html index 0233bee25..0f99e30b1 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Addition.java.html @@ -237,4 +237,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.html index 856549647..8b6c8cc52 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.html @@ -1 +1 @@ -Division

Division

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total74 of 75990%11 of 5680%13455122317
_asDerivative(Function[], int, int)308974%5758%4721201
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
execute(Function, ExecutionCall)1015994%41680%41112901
lambda$new$3(Function, ExecutionCall)103075%2250%232901
lambda$new$6(Function, ExecutionCall)80%n/a111111
lambda$new$5(ExecutionCall)20%n/a111111
calculate(double[], int, int, Function[])99100%8100%0501501
calculate(double[], int, Function[])90100%6100%0401601
_deriveB(ExecutionCall, Function, boolean, Tensor, Tensor, Tensor)69100%2100%020901
Division()60100%n/a0102101
reducePairwise(Function)42100%4100%030701
lambda$new$2(Function, Tensor, ADTarget)14100%n/a010101
lambda$new$0(ExecutionCall)10100%n/a010301
asDerivative(Function[], int)9100%n/a010101
lambda$execute$8(Tensor, Tensor)6100%n/a010101
lambda$execute$7(Tensor)6100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
\ No newline at end of file +Division

Division

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total74 of 75990%11 of 5680%13455122317
_asDerivative(Function[], int, int)308974%5758%4721201
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
execute(Function, ExecutionCall)1015994%41680%41112901
lambda$new$3(Function, ExecutionCall)103075%2250%232901
lambda$new$6(Function, ExecutionCall)80%n/a111111
lambda$new$5(ExecutionCall)20%n/a111111
calculate(double[], int, int, Function[])99100%8100%0501501
calculate(double[], int, Function[])90100%6100%0401601
_deriveB(ExecutionCall, Function, boolean, Tensor, Tensor, Tensor)69100%2100%020901
Division()60100%n/a0102101
reducePairwise(Function)42100%4100%030701
lambda$new$2(Function, Tensor, ADTarget)14100%n/a010101
lambda$new$0(ExecutionCall)10100%n/a010301
asDerivative(Function[], int)9100%n/a010101
lambda$execute$8(Tensor, Tensor)6100%n/a010101
lambda$execute$7(Tensor)6100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.java.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.java.html index 1ca76f1c6..5c128efeb 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Division.java.html @@ -258,4 +258,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.html index 8ce2b2773..b12607e6e 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.html @@ -1 +1 @@ -Modulo

Modulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total158 of 52870%13 of 4067%16352198615
lambda$new$3(Function, ExecutionCall)400%40%339911
reducePairwise(Function)33921%3125%234701
execute(Function, ExecutionCall)3219786%61672%61233701
lambda$new$2(Function, Tensor, ADTarget)140%n/a111111
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
lambda$new$5(ExecutionCall)100%n/a114411
lambda$new$6(Function, ExecutionCall)80%n/a111111
asDerivative(Function[], int)70%n/a111111
Modulo()60100%n/a0102101
calculate(double[], int, int, Function[])45100%6100%040801
calculate(double[], int, Function[])35100%4100%030701
lambda$new$0(ExecutionCall)10100%n/a010401
lambda$execute$8(Tensor, Tensor)6100%n/a010101
lambda$execute$7(Tensor)6100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
\ No newline at end of file +Modulo

Modulo

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total158 of 52870%13 of 4067%16352198615
lambda$new$3(Function, ExecutionCall)400%40%339911
reducePairwise(Function)33921%3125%234701
execute(Function, ExecutionCall)3219786%61672%61233701
lambda$new$2(Function, Tensor, ADTarget)140%n/a111111
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
lambda$new$5(ExecutionCall)100%n/a114411
lambda$new$6(Function, ExecutionCall)80%n/a111111
asDerivative(Function[], int)70%n/a111111
Modulo()60100%n/a0102101
calculate(double[], int, int, Function[])45100%6100%040801
calculate(double[], int, Function[])35100%4100%030701
lambda$new$0(ExecutionCall)10100%n/a010401
lambda$execute$8(Tensor, Tensor)6100%n/a010101
lambda$execute$7(Tensor)6100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.java.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.java.html index 8d051a2bf..81d1cae40 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Modulo.java.html @@ -200,4 +200,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.html index 474fe4c8f..61f161160 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.html @@ -1 +1 @@ -Multiplication

Multiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total83 of 67087%6 of 4686%84210113419
lambda$new$4(Function, ExecutionCall)430%40%339911
lambda$new$3(Function, Tensor, ADTarget)140%n/a111111
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
lambda$new$2(Function, ExecutionCall, int)60%n/a111111
execute(Function, ExecutionCall)59695%1583%1411901
lambda$asDerivative$11(int, Function[], Function)2996%1150%120601
derive(int[], Tensor[], Function)126100%12100%0701701
calculate(double[], int, int, Function[])89100%8100%0501501
calculate(double[], int, Function[])76100%6100%0401401
Multiplication()56100%n/a0101901
reducePairwise(Function)42100%4100%030701
lambda$execute$8(Function, ExecutionCall, Integer)22100%2100%020201
asDerivative(Function[], int)16100%n/a010501
lambda$execute$7(Function, int, int)8100%n/a010101
lambda$new$5(Function, ExecutionCall)8100%n/a010101
lambda$asDerivative$10(Function, Function)7100%2100%020101
lambda$execute$6(Tensor, Tensor)6100%n/a010101
lambda$asDerivative$9(int, Function)4100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file +Multiplication

Multiplication

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total83 of 75789%8 of 6086%105010125420
lambda$new$4(Function, ExecutionCall)430%40%339911
lambda$new$3(Function, Tensor, ADTarget)140%n/a111111
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
lambda$new$2(Function, ExecutionCall, int)60%n/a111111
execute(Function, ExecutionCall)514896%21285%2812601
lambda$asDerivative$11(int, Function[], Function)2996%1150%120601
derive(int[], Tensor[], Function)126100%12100%0701701
calculate(double[], int, int, Function[])89100%8100%0501501
calculate(double[], int, Function[])76100%6100%0401401
Multiplication()56100%n/a0101901
reducePairwise(Function)42100%4100%030701
_deleteIfNotIn(Tensor[], Tensor)35100%1583%140501
lambda$execute$8(Function, ExecutionCall, Integer)22100%2100%020201
asDerivative(Function[], int)16100%n/a010501
lambda$execute$7(Function, int, int)8100%n/a010101
lambda$new$5(Function, ExecutionCall)8100%n/a010101
lambda$asDerivative$10(Function, Function)7100%2100%020101
lambda$execute$6(Tensor, Tensor)6100%n/a010101
lambda$asDerivative$9(int, Function)4100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.java.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.java.html index 6893edaad..2c40edb9c 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Multiplication.java.html @@ -3,6 +3,7 @@ import neureka.Neureka; import neureka.Tensor; import neureka.autograd.ADAction; +import neureka.autograd.GraphNode; import neureka.backend.api.AutoDiffMode; import neureka.backend.api.Call; import neureka.backend.api.ExecutionCall; @@ -29,200 +30,223 @@ { public Multiplication() { - super( + super( new OperationBuilder() - .identifier( "multiply" ) - .operator( "*" ) - .arity( -1 ) - .isOperator( true ) - .isIndexer( false ) - .isDifferentiable( true ) - .isInline( false ) + .identifier( "multiply" ) + .operator( "*" ) + .arity( -1 ) + .isOperator( true ) + .isIndexer( false ) + .isDifferentiable( true ) + .isInline( false ) ); - setAlgorithm( + setAlgorithm( BiElementwise.class, new BiElementwise() - .setSupplyADActionFor( getDefaultAlgorithm() ) - .buildFunAlgorithm() + .setSupplyADActionFor( getDefaultAlgorithm() ) + .buildFunAlgorithm() ); - setAlgorithm( + setAlgorithm( Broadcast.class, new Broadcast() - .setAutogradModeFor( call -> AutoDiffMode.BACKWARD_ONLY ) - .setSupplyADActionFor( + .setAutogradModeFor( call -> AutoDiffMode.BACKWARD_ONLY ) + .setSupplyADActionFor( ( Function f, ExecutionCall<? extends Device<?>> call ) -> { - if ( call.autogradMode().allowsForward() ) - throw new IllegalArgumentException("Broadcast implementation does not support forward-AD!"); - Tensor<?> ctxDerivative = (Tensor<?>) call.getValOf(Arg.Derivative.class); - Function mul = Neureka.get().backend().getFunction().mul(); - if ( ctxDerivative != null ) { - return ADAction.of( target -> mul.execute( target.error(), ctxDerivative ) ); + if ( call.autogradMode().allowsForward() ) + throw new IllegalArgumentException("Broadcast implementation does not support forward-AD!"); + Tensor<?> ctxDerivative = (Tensor<?>) call.getValOf(Arg.Derivative.class); + Function mul = Neureka.get().backend().getFunction().mul(); + if ( ctxDerivative != null ) { + return ADAction.of( target -> mul.execute( target.error(), ctxDerivative ) ); } - int d = call.getDerivativeIndex(); - Tensor<?> derivative = MemUtil.keep( call.inputs(), () -> f.executeDerive( call.inputs(), d ) ); - return ADAction.of( target -> mul.execute( target.error(), derivative ) ); + int d = call.getDerivativeIndex(); + Tensor<?> derivative = MemUtil.keep( call.inputs(), () -> f.executeDerive( call.inputs(), d ) ); + return ADAction.of( target -> mul.execute( target.error(), derivative ) ); } ) - .buildFunAlgorithm() + .buildFunAlgorithm() ); - setAlgorithm( + setAlgorithm( BiScalarBroadcast.class, new BiScalarBroadcast() - .setExecution( (caller, call) -> Result.of(AbstractDeviceAlgorithm.executeFor(caller, call, AbstractDeviceAlgorithm::executeDeviceAlgorithm)).withAutoDiff( FallbackAlgorithm::ADAction )) - .buildFunAlgorithm() + .setExecution( (caller, call) -> Result.of(AbstractDeviceAlgorithm.executeFor(caller, call, AbstractDeviceAlgorithm::executeDeviceAlgorithm)).withAutoDiff( FallbackAlgorithm::ADAction )) + .buildFunAlgorithm() ); - } + } @Override public Result execute( final Function caller, final ExecutionCall<?> call ) { - if ( !caller.isFlat() ) { - int d = call.getDerivativeIndex(); - if ( d < 0 ) { - Function reducedCaller = reducePairwise(caller); - ExecutionCall<?> flatCall = AbstractDeviceAlgorithm.flatten( reducedCaller, call.withArgs(Arg.DerivIdx.of(-1)) ); - Function flat = new FunctionParser(Neureka.get().backend()).parse( flatCall.getOperation(), flatCall.arity(), true ); - Result r = super.execute( flat, flatCall ); - //for ( int i = 0; i < flatCall.inputs().length; i++ ) - // _deleteIfNotIn(call.inputs(), flatCall.input(i)); // TODO: Make it possible to delete more stuff - return r; + int d = call.getDerivativeIndex(); + if ( !caller.isFlat() ) { + if ( d < 0 ) { + Function reducedCaller = reducePairwise(caller); + ExecutionCall<?> flatCall = AbstractDeviceAlgorithm.flatten( reducedCaller, call.withArgs(Arg.DerivIdx.of(-1)) ); + for ( Tensor<?> input : flatCall.inputs() ) + input.mut().setIsIntermediate( false ); + Function flat = new FunctionParser(Neureka.get().backend()).parse( flatCall.getOperation(), flatCall.arity(), true ); + Result r = super.execute( flat, flatCall ); + for ( int i = 0; i < flatCall.inputs().length; i++ ) + _deleteIfNotIn(call.inputs(), flatCall.input(i)); + return r; } else { - if ( !call.validate().all( (a, b) -> Util.canBeBroadcast(a.shape(), b.shape()) ).isValid() ) - throw new IllegalArgumentException("The shapes of the operands of the multiplication operation must be equal or broadcast compatible! (when deriving nested functions)"); + if ( !call.validate().all( (a, b) -> Util.canBeBroadcast(a.shape(), b.shape()) ).isValid() ) + throw new IllegalArgumentException("The shapes of the operands of the multiplication operation must be equal or broadcast compatible! (when deriving nested functions)"); - Function noAd = Function.of( caller.toString(), false ); - ExecutionCall<?> flatCall = AbstractDeviceAlgorithm.flatten( noAd, call.withArgs(Arg.DerivIdx.of(-1)) ); + Function noAd = Function.of( caller.toString(), false ); + ExecutionCall<?> flatCall = AbstractDeviceAlgorithm.flatten( noAd, call.withArgs(Arg.DerivIdx.of(-1)) ); - Tensor[] results = flatCall.inputs(); - Function finalCaller = caller; - int[] toBeDerived = IntStream.range(0,caller.getSubFunctions().size()) - .filter( i -> finalCaller.getSubFunctions().get(i).dependsOn(d) ) - .toArray(); + Tensor[] results = flatCall.inputs(); + Function finalCaller = caller; + int[] toBeDerived = IntStream.range(0,caller.getSubFunctions().size()) + .filter( i -> finalCaller.getSubFunctions().get(i).dependsOn(d) ) + .toArray(); - return derive( toBeDerived, results, i->{ - Function noAD = Function.of( caller.getSubFunctions().get( i ).toString(), false ); - return noAD.call( (Call) (noAD.getOperation() == null ? call : call.withOperation(noAD.getOperation())) ); + return derive( toBeDerived, results, i->{ + Function noAD = Function.of( caller.getSubFunctions().get( i ).toString(), false ); + return noAD.call( (Call) (noAD.getOperation() == null ? call : call.withOperation(noAD.getOperation())) ); } ); } } - return super.execute( reducePairwise(caller), call ); + + + Function reduced = reducePairwise(caller); + //ExecutionCall<?> flatCall = call; + //Function flat = caller; + //if ( d < 0 && caller.isFlat() && subFunctions.stream().anyMatch( f -> f instanceof FunctionConstant) ) { + // Function noAd = Function.of( caller.toString(), false ); + // ExecutionCall<?> flatCall = AbstractDeviceAlgorithm.flatten( noAd, call.withArgs(Arg.DerivIdx.of(-1)) ); + // return super.execute( reducePairwise(caller), call ); + //} + if ( reduced.equals(caller) && reduced.isFlat() ) + return super.execute( reduced, call ); + else + return this.execute( reduced, call ); } + private void _deleteIfNotIn( Tensor<?>[] inputs, Tensor<?> input ) { + for ( Tensor<?> i : inputs ) { + if ( i == input ) return; + } + if ( input.getGraphNode().map(GraphNode::canBeDeleted).orElse(true) ) + input.mut().delete(); + } + public static Result derive( int[] toBeDerived, Tensor[] results, java.util.function.Function<Integer, Tensor<?>> deriveAt ) { - Tensor[] derivatives = new Tensor[ toBeDerived.length ]; - Function mul = Neureka.get().backend().getFunction().mul(); - Function add = Neureka.get().backend().getFunction().add(); - Tensor<?> finalDerivative = null; - for ( int i = 0; i < derivatives.length; i++ ) { - Tensor<?> deriv = deriveAt.apply( toBeDerived[i] ); - derivatives[ i ] = deriv; - Tensor<?> localDeriv = null; - for ( int j = 0; j < results.length; j++ ) { + Tensor[] derivatives = new Tensor[ toBeDerived.length ]; + Function mul = Neureka.get().backend().getFunction().mul(); + Function add = Neureka.get().backend().getFunction().add(); + Tensor<?> finalDerivative = null; + for ( int i = 0; i < derivatives.length; i++ ) { + Tensor<?> deriv = deriveAt.apply( toBeDerived[i] ); + derivatives[ i ] = deriv; + Tensor<?> localDeriv = null; + for ( int j = 0; j < results.length; j++ ) { // Now we calculate the local derivatives of the multiplication operation: - if ( j == toBeDerived[i] ) { - if ( localDeriv == null ) localDeriv = derivatives[ i ]; - else localDeriv = mul.call( localDeriv, derivatives[ i ] ); + if ( j == toBeDerived[i] ) { + if ( localDeriv == null ) localDeriv = derivatives[ i ]; + else localDeriv = mul.call( localDeriv, derivatives[ i ] ); } else { - if ( localDeriv == null ) localDeriv = results[ j ].mut().setIsIntermediate(false); - else localDeriv = mul.call( localDeriv, results[ j ].mut().setIsIntermediate(false) ); + if ( localDeriv == null ) localDeriv = results[ j ].mut().setIsIntermediate(false); + else localDeriv = mul.call( localDeriv, results[ j ].mut().setIsIntermediate(false) ); } } - if ( finalDerivative == null ) finalDerivative = localDeriv; - else finalDerivative = add.call( (Tensor<Object>) finalDerivative, (Tensor<Object>) localDeriv ); + if ( finalDerivative == null ) finalDerivative = localDeriv; + else finalDerivative = add.call( (Tensor<Object>) finalDerivative, (Tensor<Object>) localDeriv ); } - return Result.of( finalDerivative.mut().setIsIntermediate(true) ); + return Result.of( finalDerivative.mut().setIsIntermediate(true) ); } private Function reducePairwise( final Function fun ) { - Function reduced = fun; - if ( reduced.getSubFunctions().size() > 2 ) { + Function reduced = fun; + if ( reduced.getSubFunctions().size() > 2 ) { /* So currently we have something like this: a*b*c*d... However, this is how it is really executed: ((((a*b)*c)*d)..) ...so let's create a function that is nested like the above: */ - Function nested = reduced.getSubFunctions().get(0); - for ( int i = 1; i < reduced.getSubFunctions().size(); i++ ) - nested = Function.of( nested + " * " + reduced.getSubFunctions().get(i), true ); + Function nested = reduced.getSubFunctions().get(0); + for ( int i = 1; i < reduced.getSubFunctions().size(); i++ ) + nested = Function.of( nested + " * " + reduced.getSubFunctions().get(i), true ); - reduced = nested; + reduced = nested; } - return reduced; + return reduced; } @Override public String asDerivative( Function[] children, int derivationIndex) { - return Arrays.stream( children ) - .filter( child -> child.dependsOn(derivationIndex) ) - .map( child -> { - String derivative = child.getDerivative(derivationIndex).toString(); - return ( derivative.equals("1.0") ? "" : " * " ) + - Arrays.stream( children ) - .filter( inner -> inner != child ) - .map( Object::toString ) - .collect( Collectors.joining( " * " ) ); + return Arrays.stream( children ) + .filter( child -> child.dependsOn(derivationIndex) ) + .map( child -> { + String derivative = child.getDerivative(derivationIndex).toString(); + return ( derivative.equals("1.0") ? "" : " * " ) + + Arrays.stream( children ) + .filter( inner -> inner != child ) + .map( Object::toString ) + .collect( Collectors.joining( " * " ) ); } ) - .map( Object::toString ) - .collect( Collectors.joining( " + " ) ); + .map( Object::toString ) + .collect( Collectors.joining( " + " ) ); } @Override public double calculate( double[] inputs, int j, int d, Function[] src ) { - if ( j < 0 ) return calculate( inputs, d, src ); - if ( d < 0 ) { - double result = src[ 0 ].call( inputs, j ); - for ( int i = 1; i < src.length; i++ ) { - final double current = src[ i ].call( inputs, j ); - result *= current; + if ( j < 0 ) return calculate( inputs, d, src ); + if ( d < 0 ) { + double result = src[ 0 ].call( inputs, j ); + for ( int i = 1; i < src.length; i++ ) { + final double current = src[ i ].call( inputs, j ); + result *= current; } - return result; + return result; } else { double u, ud, v, vd; - u = src[ 0 ].call( inputs, j ); - ud = src[ 0 ].derive( inputs, d, j ); - - for ( int ji = 1; ji < src.length; ji++ ) { - v = src[ ji ].call( inputs, j ); - vd = src[ ji ].derive( inputs, d, j ); - ud = u * vd + v * ud; - u *= v; + u = src[ 0 ].call( inputs, j ); + ud = src[ 0 ].derive( inputs, d, j ); + + for ( int ji = 1; ji < src.length; ji++ ) { + v = src[ ji ].call( inputs, j ); + vd = src[ ji ].derive( inputs, d, j ); + ud = u * vd + v * ud; + u *= v; } - return ud; + return ud; } } public static double calculate( double[] inputs, int d, Function[] src ) { - if ( d < 0 ) { - double result = src[ 0 ].call( inputs ); - for ( int i = 1; i < src.length; i++ ) { - final double current = src[ i ].call( inputs ); - result *= current; + if ( d < 0 ) { + double result = src[ 0 ].call( inputs ); + for ( int i = 1; i < src.length; i++ ) { + final double current = src[ i ].call( inputs ); + result *= current; } - return result; + return result; } else { double u, ud, v, vd; - u = src[ 0 ].call( inputs ); - ud = src[ 0 ].derive( inputs, d ); - for ( int j = 1; j < src.length; j++ ) { - v = src[ j ].call( inputs ); - vd = src[ j ].derive( inputs, d ); - - ud = u * vd + v * ud; - u *= v; // ...this step can be avoided (TODO optimize) + u = src[ 0 ].call( inputs ); + ud = src[ 0 ].derive( inputs, d ); + for ( int j = 1; j < src.length; j++ ) { + v = src[ j ].call( inputs ); + vd = src[ j ].derive( inputs, d ); + + ud = u * vd + v * ud; + u *= v; // ...this step can be avoided (TODO optimize) } - return ud; + return ud; } } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.html index b0416b127..023d28ec0 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.html @@ -1 +1 @@ -Power

Power

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total118 of 65782%11 of 5680%144212109514
lambda$new$3(Function, ExecutionCall)400%40%339911
asDerivative(Function[], int)3116484%41881%41212601
lambda$new$2(Function, Tensor, ADTarget)140%n/a111111
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
calculate(double[], int, Function[])611995%21083%2702101
lambda$new$6(Function, ExecutionCall)60%n/a112211
calculate(double[], int, int, Function[])513396%11392%1802201
lambda$new$5(ExecutionCall)20%n/a111111
Power()60100%n/a0102101
reducePairwise(Function)47100%4100%030701
execute(Function, ExecutionCall)7100%n/a010101
lambda$asDerivative$7(Function[], int)5100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file +Power

Power

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total118 of 65782%11 of 5680%144212109514
lambda$new$3(Function, ExecutionCall)400%40%339911
asDerivative(Function[], int)3116484%41881%41212601
lambda$new$2(Function, Tensor, ADTarget)140%n/a111111
lambda$new$1(Function, Tensor, ADTarget)140%n/a111111
calculate(double[], int, Function[])611995%21083%2702101
lambda$new$6(Function, ExecutionCall)60%n/a112211
calculate(double[], int, int, Function[])513396%11392%1802201
lambda$new$5(ExecutionCall)20%n/a111111
Power()60100%n/a0102101
reducePairwise(Function)47100%4100%030701
execute(Function, ExecutionCall)7100%n/a010101
lambda$asDerivative$7(Function[], int)5100%n/a010101
lambda$new$4(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.java.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.java.html index 684dcbe11..35fcdf51b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Power.java.html @@ -203,4 +203,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.html index 58be821c7..efe421827 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.html @@ -1 +1 @@ -Subtraction

Subtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total57 of 55089%7 of 4484%7376101115
reducePairwise(Function)33921%3125%234701
lambda$new$4(Function, ExecutionCall)94382%2466%2411001
lambda$new$1(Function, ExecutionCall)80%n/a111111
execute(Function, ExecutionCall)516297%11392%1812301
asDerivative(Function[], int)22893%1150%120601
calculate(double[], int, int, Function[])72100%10100%0601301
calculate(double[], int, Function[])61100%8100%0501201
Subtraction()58100%n/a0102001
lambda$new$3(Device, Tensor, Tensor, int, ADTarget)39100%n/a010901
lambda$execute$5(Function, int, int)8100%n/a010101
lambda$asDerivative$7(int, Function)4100%n/a010101
lambda$asDerivative$6(int, Function)4100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +Subtraction

Subtraction

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total57 of 55089%7 of 4484%7376101115
reducePairwise(Function)33921%3125%234701
lambda$new$4(Function, ExecutionCall)94382%2466%2411001
lambda$new$1(Function, ExecutionCall)80%n/a111111
execute(Function, ExecutionCall)516297%11392%1812301
asDerivative(Function[], int)22893%1150%120601
calculate(double[], int, int, Function[])72100%10100%0601301
calculate(double[], int, Function[])61100%8100%0501201
Subtraction()58100%n/a0102001
lambda$new$3(Device, Tensor, Tensor, int, ADTarget)39100%n/a010901
lambda$execute$5(Function, int, int)8100%n/a010101
lambda$asDerivative$7(int, Function)4100%n/a010101
lambda$asDerivative$6(int, Function)4100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.java.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.java.html index 8a61dd849..4a3aae090 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Subtraction.java.html @@ -198,4 +198,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.html index e1192047a..ded983228 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.html @@ -1 +1 @@ -Util

Util

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 4785%3 of 1275%482812
canBeBroadcast(Shape, Shape)44090%3975%371701
Util()30%n/a111111
\ No newline at end of file +Util

Util

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 4785%3 of 1275%482812
canBeBroadcast(Shape, Shape)44090%3975%371701
Util()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.java.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.java.html index ecb1b3bbd..eb5303d1c 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/Util.java.html @@ -16,4 +16,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/index.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/index.html index 92d18384e..d74314e7c 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/index.html @@ -1 +1 @@ -neureka.backend.main.operations.operator

neureka.backend.main.operations.operator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total526 of 3,89386%63 of 32080%73261596722010107
Modulo15837070%132767%1635219861501
Power11853982%114580%14421210951401
Multiplication8358787%64086%8421011341901
Division7468590%114580%1345512231701
Subtraction5749389%73784%737610111501
Addition2965395%125481%1152312101901
Util74085%3975%48281201
\ No newline at end of file +neureka.backend.main.operations.operator

neureka.backend.main.operations.operator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total526 of 3,98086%65 of 33480%75269596842010207
Modulo15837070%132767%1635219861501
Power11853982%114580%14421210951401
Multiplication8367489%85286%10501012542001
Division7468590%114580%1345512231701
Subtraction5749389%73784%737610111501
Addition2965395%125481%1152312101901
Util74085%3975%48281201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.operator/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.operator/index.source.html index f4cbb2bda..21b2cc735 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.operator/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.operator/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.operator

neureka.backend.main.operations.operator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total526 of 3,89386%63 of 32080%73261596722010107
Modulo.java15837070%132767%1635219861501
Power.java11853982%114580%14421210951401
Multiplication.java8358787%64086%8421011341901
Division.java7468590%114580%1345512231701
Subtraction.java5749389%73784%737610111501
Addition.java2965395%125481%1152312101901
Util.java74085%3975%48281201
\ No newline at end of file +neureka.backend.main.operations.operator

neureka.backend.main.operations.operator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total526 of 3,98086%65 of 33480%75269596842010207
Modulo.java15837070%132767%1635219861501
Power.java11853982%114580%14421210951401
Multiplication.java8367489%85286%10501012542001
Division.java7468590%114580%1345512231701
Subtraction.java5749389%73784%737610111501
Addition.java2965395%125481%1152312101901
Util.java74085%3975%48281201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce$Type.html b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce$Type.html index 0c956d99e..adb1d39c9 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce$Type.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce$Type.html @@ -1 +1 @@ -CPUReduce.Type

CPUReduce.Type

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total72 of 23168%6 of 4285%643626019
getFloatComparator()12942%1266%131401
getDoubleComparator()12942%1266%131401
getIntComparator()12942%1266%131401
getLongComparator()12942%1266%131401
getByteComparator()12942%1266%131401
getShortComparator()12942%1266%131401
static {...}15100%n/a010201
lambda$getLongComparator$7(long, long)8100%2100%020101
lambda$getLongComparator$6(long, long)8100%2100%020101
lambda$getDoubleComparator$3(double, double)8100%2100%020101
lambda$getDoubleComparator$2(double, double)8100%2100%020101
lambda$getFloatComparator$1(float, float)8100%2100%020101
lambda$getFloatComparator$0(float, float)8100%2100%020101
lambda$getShortComparator$11(short, short)7100%2100%020101
lambda$getShortComparator$10(short, short)7100%2100%020101
lambda$getByteComparator$9(byte, byte)7100%2100%020101
lambda$getByteComparator$8(byte, byte)7100%2100%020101
lambda$getIntComparator$5(int, int)7100%2100%020101
lambda$getIntComparator$4(int, int)7100%2100%020101
\ No newline at end of file +CPUReduce.Type

CPUReduce.Type

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total72 of 24070%6 of 4285%643626019
getFloatComparator()12942%1266%131401
getDoubleComparator()12942%1266%131401
getIntComparator()12942%1266%131401
getLongComparator()12942%1266%131401
getByteComparator()12942%1266%131401
getShortComparator()12942%1266%131401
static {...}24100%n/a010201
lambda$getLongComparator$7(long, long)8100%2100%020101
lambda$getLongComparator$6(long, long)8100%2100%020101
lambda$getDoubleComparator$3(double, double)8100%2100%020101
lambda$getDoubleComparator$2(double, double)8100%2100%020101
lambda$getFloatComparator$1(float, float)8100%2100%020101
lambda$getFloatComparator$0(float, float)8100%2100%020101
lambda$getShortComparator$11(short, short)7100%2100%020101
lambda$getShortComparator$10(short, short)7100%2100%020101
lambda$getByteComparator$9(byte, byte)7100%2100%020101
lambda$getByteComparator$8(byte, byte)7100%2100%020101
lambda$getIntComparator$5(int, int)7100%2100%020101
lambda$getIntComparator$4(int, int)7100%2100%020101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.html b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.html index 556324a0f..2c55f3e40 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.html @@ -1 +1 @@ -CPUReduce

CPUReduce

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total26 of 77196%6 of 7091%5514136016
_runRecursively(Tensor, CPU)1736195%42686%31636101
run(ExecutionCall)92976%2250%231501
lambda$_runRecursively$10(int, int, byte[], CPUReduce.ComparatorI8, int[], int)42100%4100%0301101
lambda$_runRecursively$8(int, int, short[], CPUReduce.ComparatorI16, int[], int)42100%4100%0301101
lambda$_runRecursively$6(int, int, long[], CPUReduce.ComparatorI64, int[], int)42100%4100%0301101
lambda$_runRecursively$4(int, int, int[], CPUReduce.ComparatorI32, int[], int)42100%4100%0301101
lambda$_runRecursively$2(int, int, double[], CPUReduce.ComparatorF64, int[], int)42100%4100%0301101
lambda$_runRecursively$0(int, int, float[], CPUReduce.ComparatorF32, int[], int)42100%4100%0301101
lambda$_runRecursively$11(byte[], byte[], int[], int, int)16100%2100%020101
lambda$_runRecursively$9(short[], short[], int[], int, int)16100%2100%020101
lambda$_runRecursively$7(long[], long[], int[], int, int)16100%2100%020101
lambda$_runRecursively$5(int[], int[], int[], int, int)16100%2100%020101
lambda$_runRecursively$3(double[], double[], int[], int, int)16100%2100%020101
lambda$_runRecursively$1(float[], float[], int[], int, int)16100%2100%020101
CPUReduce(CPUReduce.Type)6100%n/a010301
static {...}100%n/a010101
\ No newline at end of file +CPUReduce

CPUReduce

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total26 of 77196%6 of 7091%5514136016
_runRecursively(Tensor, CPU)1736195%42686%31636101
run(ExecutionCall)92976%2250%231501
lambda$_runRecursively$10(int, int, byte[], CPUReduce.ComparatorI8, int[], int)42100%4100%0301101
lambda$_runRecursively$8(int, int, short[], CPUReduce.ComparatorI16, int[], int)42100%4100%0301101
lambda$_runRecursively$6(int, int, long[], CPUReduce.ComparatorI64, int[], int)42100%4100%0301101
lambda$_runRecursively$4(int, int, int[], CPUReduce.ComparatorI32, int[], int)42100%4100%0301101
lambda$_runRecursively$2(int, int, double[], CPUReduce.ComparatorF64, int[], int)42100%4100%0301101
lambda$_runRecursively$0(int, int, float[], CPUReduce.ComparatorF32, int[], int)42100%4100%0301101
lambda$_runRecursively$11(byte[], byte[], int[], int, int)16100%2100%020101
lambda$_runRecursively$9(short[], short[], int[], int, int)16100%2100%020101
lambda$_runRecursively$7(long[], long[], int[], int, int)16100%2100%020101
lambda$_runRecursively$5(int[], int[], int[], int, int)16100%2100%020101
lambda$_runRecursively$3(double[], double[], int[], int, int)16100%2100%020101
lambda$_runRecursively$1(float[], float[], int[], int, int)16100%2100%020101
CPUReduce(CPUReduce.Type)6100%n/a010301
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.java.html index 4a9aca035..5034f1b6a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUReduce.java.html @@ -253,4 +253,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.html b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.html index fed511388..81566c0aa 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.html @@ -1 +1 @@ -CPUSum

CPUSum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 59795%5 of 5090%5353115010
_runRecursively(Tensor, CPU)1832594%32990%31726601
run(ExecutionCall)92270%2250%231501
lambda$_runRecursively$6(int, int, Object[], Number[], int)38100%2100%020701
lambda$_runRecursively$5(int, int, byte[], byte[], int)31100%2100%020601
lambda$_runRecursively$4(int, int, short[], short[], int)31100%2100%020601
lambda$_runRecursively$3(int, int, long[], long[], int)30100%2100%020601
lambda$_runRecursively$2(int, int, int[], int[], int)30100%2100%020601
lambda$_runRecursively$1(int, int, double[], double[], int)30100%2100%020601
lambda$_runRecursively$0(int, int, float[], float[], int)30100%2100%020601
CPUSum()3100%n/a010101
\ No newline at end of file +CPUSum

CPUSum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total27 of 59795%5 of 5090%5353115010
_runRecursively(Tensor, CPU)1832594%32990%31726601
run(ExecutionCall)92270%2250%231501
lambda$_runRecursively$6(int, int, Object[], Number[], int)38100%2100%020701
lambda$_runRecursively$5(int, int, byte[], byte[], int)31100%2100%020601
lambda$_runRecursively$4(int, int, short[], short[], int)31100%2100%020601
lambda$_runRecursively$3(int, int, long[], long[], int)30100%2100%020601
lambda$_runRecursively$2(int, int, int[], int[], int)30100%2100%020601
lambda$_runRecursively$1(int, int, double[], double[], int)30100%2100%020601
lambda$_runRecursively$0(int, int, float[], float[], int)30100%2100%020601
CPUSum()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.java.html index b62f10f56..4229e879e 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/CPUSum.java.html @@ -160,4 +160,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.html b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.html index 841a59f3d..41b561157 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.html @@ -1 +1 @@ -neureka.backend.main.operations.other.internal

neureka.backend.main.operations.other.internal

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total125 of 1,59992%17 of 16289%161291327704503
CPUReduce.Type7215968%63685%64362601901
CPUSum2757095%54590%535311501001
CPUReduce2674596%66491%551413601601
\ No newline at end of file +neureka.backend.main.operations.other.internal

neureka.backend.main.operations.other.internal

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total125 of 1,60892%17 of 16289%161291327704503
CPUReduce.Type7216870%63685%64362601901
CPUSum2757095%54590%535311501001
CPUReduce2674596%66491%551413601601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.source.html index aff57dc68..c43088723 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other.internal/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.other.internal

neureka.backend.main.operations.other.internal

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total125 of 1,59992%17 of 16289%161291327704503
CPUReduce.java9890490%1210089%11941016203502
CPUSum.java2757095%54590%535311501001
\ No newline at end of file +neureka.backend.main.operations.other.internal

neureka.backend.main.operations.other.internal

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total125 of 1,60892%17 of 16289%161291327704503
CPUReduce.java9891390%1210089%11941016203502
CPUSum.java2757095%54590%535311501001
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.html b/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.html index 0c8977398..82e808b22 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.html @@ -1 +1 @@ -AssignLeft

AssignLeft

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total164 of 42161%16 of 3452%15342074417
lambda$new$5(ExecutionCall)470%20%227711
reducePairwise(Function)38919%3125%234701
lambda$new$11(ExecutionCall)350%20%224411
calculate(double[], int, int, Function[])220%20%222211
lambda$new$2(ExecutionCall)103577%2675%2521101
execute(Function, ExecutionCall)55892%1375%131701
lambda$new$1(Tensor[])5758%3125%230101
lambda$new$3(ExecutionCall)20%n/a111111
AssignLeft()54100%n/a0102101
stringify(String[])39100%4100%030501
lambda$new$7(Tensor[])12100%1375%130101
lambda$new$10(Function, ExecutionCall)11100%n/a010301
lambda$new$4(Function, ExecutionCall)11100%n/a010301
lambda$new$8(ExecutionCall)9100%n/a010401
lambda$new$6(Tensor)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$9(ExecutionCall)2100%n/a010101
\ No newline at end of file +AssignLeft

AssignLeft

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total164 of 42161%16 of 3452%15342074417
lambda$new$5(ExecutionCall)470%20%227711
reducePairwise(Function)38919%3125%234701
lambda$new$11(ExecutionCall)350%20%224411
calculate(double[], int, int, Function[])220%20%222211
lambda$new$2(ExecutionCall)103577%2675%2521101
execute(Function, ExecutionCall)55892%1375%131701
lambda$new$1(Tensor[])5758%3125%230101
lambda$new$3(ExecutionCall)20%n/a111111
AssignLeft()54100%n/a0102101
stringify(String[])39100%4100%030501
lambda$new$7(Tensor[])12100%1375%130101
lambda$new$10(Function, ExecutionCall)11100%n/a010301
lambda$new$4(Function, ExecutionCall)11100%n/a010301
lambda$new$8(ExecutionCall)9100%n/a010401
lambda$new$6(Tensor)5100%n/a010101
lambda$new$0(Tensor)5100%n/a010101
lambda$new$9(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.java.html index 51f40b809..e678b09c8 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/AssignLeft.java.html @@ -145,4 +145,4 @@ return d >= 0 ? src[ right ].derive( inputs, d, j ) : src[ right ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.html index 8638ea984..cbdcd8d2b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.html @@ -1 +1 @@ -Cat

Cat

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 59195%10 of 5481%11447106117
_catFrames(Tensor[], Tensor, int)1023395%62278%61525201
calculate(double[], int, int, Function[])70%n/a111111
execute(Function, ExecutionCall)51473%2250%231301
lambda$new$0(ExecutionCall)43990%2675%251801
lambda$new$5(Function, ExecutionCall)317198%12100%0722301
lambda$new$4(List, Integer, ADTarget)35100%2100%020401
Cat()33100%n/a0101501
lambda$_catFrames$7(Map)6100%n/a010101
lambda$new$2(Integer, Tensor)6100%n/a010101
lambda$_catFrames$8(int, List)4100%n/a010101
lambda$_catFrames$6(Tensor)4100%n/a010101
lambda$_catFrames$12(Object)3100%n/a010101
lambda$_catFrames$11(Object)3100%n/a010101
lambda$_catFrames$10(Object)3100%n/a010101
lambda$_catFrames$9(Object)3100%n/a010101
lambda$new$3(Integer)3100%n/a010101
lambda$new$1(ExecutionCall)100%n/a010101
\ No newline at end of file +Cat

Cat

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 59195%10 of 5481%11447106117
_catFrames(Tensor[], Tensor, int)1023395%62278%61525201
calculate(double[], int, int, Function[])70%n/a111111
execute(Function, ExecutionCall)51473%2250%231301
lambda$new$0(ExecutionCall)43990%2675%251801
lambda$new$5(Function, ExecutionCall)317198%12100%0722301
lambda$new$4(List, Integer, ADTarget)35100%2100%020401
Cat()33100%n/a0101501
lambda$_catFrames$7(Map)6100%n/a010101
lambda$new$2(Integer, Tensor)6100%n/a010101
lambda$_catFrames$8(int, List)4100%n/a010101
lambda$_catFrames$6(Tensor)4100%n/a010101
lambda$_catFrames$12(Object)3100%n/a010101
lambda$_catFrames$11(Object)3100%n/a010101
lambda$_catFrames$10(Object)3100%n/a010101
lambda$_catFrames$9(Object)3100%n/a010101
lambda$new$3(Integer)3100%n/a010101
lambda$new$1(ExecutionCall)100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.java.html index cb25e68ff..a10a48948 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Cat.java.html @@ -193,4 +193,4 @@ @Override public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.html b/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.html index 578d0131d..8b2d8368a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.html @@ -1 +1 @@ -DimFit

DimFit

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total214 of 2140%28 of 280%2020393966
lambda$new$2(Function, ExecutionCall)1700%280%1515222211
DimFit()320%n/a11151511
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$1(ExecutionCall)20%n/a111111
lambda$new$0(ExecutionCall)20%n/a111111
static {...}0%n/a111111
\ No newline at end of file +DimFit

DimFit

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total214 of 2140%28 of 280%2020393966
lambda$new$2(Function, ExecutionCall)1700%280%1515222211
DimFit()320%n/a11151511
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$1(ExecutionCall)20%n/a111111
lambda$new$0(ExecutionCall)20%n/a111111
static {...}0%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.java.html index 96794a22f..b3c3b111e 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/DimFit.java.html @@ -77,4 +77,4 @@ return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.html b/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.html index 3fc4f41f2..bfcd44361 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.html @@ -1 +1 @@ -DimTrim

DimTrim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total73 of 61588%7 of 3479%8379112120
lambda$new$3(Function, ExecutionCall)353146%2250%233901
lambda$new$2(ExecutionCall, Function, ADTarget)193464%1150%123901
calculate(double[], int, int, Function[])70%n/a111111
_pad(Tensor, int[], boolean)618996%2880%2613901
_trim(Tensor, boolean)617196%2880%2613301
endsFrom(int[])50100%8100%050501
DimTrim()32100%n/a0101501
lambda$_trim$13(Integer)3100%n/a010101
lambda$_trim$12(Integer)3100%n/a010101
lambda$_trim$11(Integer)3100%n/a010101
lambda$_trim$10(Integer)3100%n/a010101
lambda$_trim$9(Integer)3100%n/a010101
lambda$_pad$8(Integer)3100%n/a010101
lambda$_pad$7(Integer)3100%n/a010101
lambda$_pad$6(Integer)3100%n/a010101
lambda$_pad$5(Integer)3100%n/a010101
lambda$_pad$4(Integer)3100%n/a010101
lambda$new$1(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +DimTrim

DimTrim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total73 of 61588%7 of 3479%8379112120
lambda$new$3(Function, ExecutionCall)353146%2250%233901
lambda$new$2(ExecutionCall, Function, ADTarget)193464%1150%123901
calculate(double[], int, int, Function[])70%n/a111111
_pad(Tensor, int[], boolean)618996%2880%2613901
_trim(Tensor, boolean)617196%2880%2613301
endsFrom(int[])50100%8100%050501
DimTrim()32100%n/a0101501
lambda$_trim$13(Integer)3100%n/a010101
lambda$_trim$12(Integer)3100%n/a010101
lambda$_trim$11(Integer)3100%n/a010101
lambda$_trim$10(Integer)3100%n/a010101
lambda$_trim$9(Integer)3100%n/a010101
lambda$_pad$8(Integer)3100%n/a010101
lambda$_pad$7(Integer)3100%n/a010101
lambda$_pad$6(Integer)3100%n/a010101
lambda$_pad$5(Integer)3100%n/a010101
lambda$_pad$4(Integer)3100%n/a010101
lambda$new$1(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.java.html index 96fe1d71f..514672c63 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/DimTrim.java.html @@ -178,4 +178,4 @@ return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Max.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Max.html index c41d3d153..10c4caecd 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Max.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Max.html @@ -1 +1 @@ -Max

Max

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 19185%4 of 633%51244329
lambda$new$5(ExecutionCall)130%20%223311
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$4(Function, ExecutionCall)47094%1150%1201101
lambda$new$3(Class, Shape, Device, int, ADTarget)43088%1150%120601
Max()49100%n/a0101801
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +Max

Max

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 19185%4 of 633%51244329
lambda$new$5(ExecutionCall)130%20%223311
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$4(Function, ExecutionCall)47094%1150%1201101
lambda$new$3(Class, Shape, Device, int, ADTarget)43088%1150%120601
Max()49100%n/a0101801
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Max.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Max.java.html index f1c1b924f..a988c0ed3 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Max.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Max.java.html @@ -77,4 +77,4 @@ @Override public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Min.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Min.html index 064c5f975..9d4a3ab32 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Min.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Min.html @@ -1 +1 @@ -Min

Min

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 19285%4 of 633%51244329
lambda$new$5(ExecutionCall)130%20%223311
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$4(Function, ExecutionCall)47194%1150%1201101
lambda$new$3(Class, Shape, Device, int, ADTarget)43088%1150%120601
Min()49100%n/a0101801
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +Min

Min

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 19285%4 of 633%51244329
lambda$new$5(ExecutionCall)130%20%223311
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$4(Function, ExecutionCall)47194%1150%1201101
lambda$new$3(Class, Shape, Device, int, ADTarget)43088%1150%120601
Min()49100%n/a0101801
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$0(Tensor)5100%n/a010101
lambda$new$2(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Min.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Min.java.html index 55e22c69b..ee398c85d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Min.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Min.java.html @@ -77,4 +77,4 @@ @Override public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.html index 058497622..d9f985a8a 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.html @@ -1 +1 @@ -Permute

Permute

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total53 of 49789%8 of 5284%838895112
_shapeCheck(int[], Tensor)29617%3125%234601
stringify(String[])118488%2880%2621701
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$3(Function, ExecutionCall)56692%1787%1511201
_rearrangeAxisOf(Tensor, int[], boolean)3897%2250%230901
makeFit(Tensor[], boolean)139100%18100%01002001
invert(int[])46100%8100%0501201
Permute()32100%n/a0101501
lambda$new$2(Function, ADTarget)19100%n/a010101
lambda$stringify$4(String)10100%n/a010301
lambda$new$1(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file +Permute

Permute

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total53 of 49789%8 of 5284%838895112
_shapeCheck(int[], Tensor)29617%3125%234601
stringify(String[])118488%2880%2621701
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$3(Function, ExecutionCall)56692%1787%1511201
_rearrangeAxisOf(Tensor, int[], boolean)3897%2250%230901
makeFit(Tensor[], boolean)139100%18100%01002001
invert(int[])46100%8100%0501201
Permute()32100%n/a0101501
lambda$new$2(Function, ADTarget)19100%n/a010101
lambda$stringify$4(String)10100%n/a010301
lambda$new$1(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.java.html index d67a5efec..2f14266e0 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Permute.java.html @@ -182,4 +182,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.html index 6b14d55ec..7014799eb 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.html @@ -1 +1 @@ -Randomization

Randomization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 13288%2 of 1080%31223017
lambda$new$4(ExecutionCall)75087%1375%131801
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$0(Tensor)11694%1583%140301
Randomization()36100%n/a0101501
lambda$new$3(Function, ExecutionCall)7100%n/a010101
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$2(ExecutionCall)2100%n/a010101
\ No newline at end of file +Randomization

Randomization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 13288%2 of 1080%31223017
lambda$new$4(ExecutionCall)75087%1375%131801
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$0(Tensor)11694%1583%140301
Randomization()36100%n/a0101501
lambda$new$3(Function, ExecutionCall)7100%n/a010101
lambda$new$1(ExecutionCall)6100%n/a010301
lambda$new$2(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.java.html index 7c7285126..eebc67781 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Randomization.java.html @@ -78,4 +78,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.html b/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.html index db913c365..6a900c03c 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.html @@ -1 +1 @@ -ReLayout

ReLayout

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total61 of 29579%7 of 2875%1030971316
_checkLayoutConversion(NDConfiguration, NDConfiguration, NDConfiguration.Layout)29923%2250%234701
toLayout(Tensor, NDConfiguration.Layout)104481%31178%3821401
lambda$new$2(NDConfiguration.Layout, ADTarget)90%n/a112211
calculate(double[], int, int, Function[])70%n/a111111
_createNewNDCFrom(NDConfiguration, int[])41578%1150%120301
lambda$new$1(ExecutionCall)20%n/a111111
ReLayout()32100%n/a0101501
lambda$new$3(Function, ExecutionCall)32100%n/a010701
_fromRMToCM(Tensor)31100%2100%020801
_fromCMToRM(Tensor)29100%1375%130801
lambda$_assignIfActual$6(Tensor, Tensor)16100%n/a010101
_assignIfActual(Tensor, Supplier)15100%2100%020401
lambda$_fromRMToCM$5(Tensor)6100%n/a010101
lambda$_fromCMToRM$4(Tensor)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +ReLayout

ReLayout

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total61 of 29579%7 of 2875%1030971316
_checkLayoutConversion(NDConfiguration, NDConfiguration, NDConfiguration.Layout)29923%2250%234701
toLayout(Tensor, NDConfiguration.Layout)104481%31178%3821401
lambda$new$2(NDConfiguration.Layout, ADTarget)90%n/a112211
calculate(double[], int, int, Function[])70%n/a111111
_createNewNDCFrom(NDConfiguration, int[])41578%1150%120301
lambda$new$1(ExecutionCall)20%n/a111111
ReLayout()32100%n/a0101501
lambda$new$3(Function, ExecutionCall)32100%n/a010701
_fromRMToCM(Tensor)31100%2100%020801
_fromCMToRM(Tensor)29100%1375%130801
lambda$_assignIfActual$6(Tensor, Tensor)16100%n/a010101
_assignIfActual(Tensor, Supplier)15100%2100%020401
lambda$_fromRMToCM$5(Tensor)6100%n/a010101
lambda$_fromCMToRM$4(Tensor)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.java.html index d6ac5eda2..662c5e075 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/ReLayout.java.html @@ -155,4 +155,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.html index 3b6bd8587..e66e27362 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.html @@ -1 +1 @@ -Reshape

Reshape

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 18890%2 of 1283%31335517
calculate(double[], int, int, Function[])70%n/a111111
lambda$new$3(Function, ExecutionCall)57093%1375%1311901
_resolveNewShape(int, int[])55391%1787%1511501
Reshape()32100%n/a0101501
lambda$new$2(NDConfiguration, ADTarget)12100%n/a010501
lambda$new$1(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file +Reshape

Reshape

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 18888%3 of 1275%41345517
lambda$new$3(Function, ExecutionCall)106586%2250%2321901
calculate(double[], int, int, Function[])70%n/a111111
_resolveNewShape(int, int[])55391%1787%1511501
Reshape()32100%n/a0101501
lambda$new$2(NDConfiguration, ADTarget)12100%n/a010501
lambda$new$1(ExecutionCall)2100%n/a010101
lambda$new$0(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.java.html index 58780fe85..8970ea970 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Reshape.java.html @@ -57,8 +57,8 @@ parent.addChild( reshaped ); input.set( parent ); - if ( input.isOutsourced() ) - input.getDevice().store( reshaped ); + if ( input.isOutsourced() ) + input.getDevice().store( reshaped ); NDConfiguration originalConfig = input.getNDConf(); @@ -120,4 +120,4 @@ return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.html index a933581cd..8f9ae3cca 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.html @@ -1 +1 @@ -Slice

Slice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 56394%6 of 5088%7363106111
_slice(int[], int[], int[], Tensor)1625394%43489%42014401
calculate(double[], int, int, Function[])70%n/a111111
lambda$_sliceFrame$5(int[], int[], int[], Tensor, NDFrame)67692%2880%2611801
lambda$new$2(Class, Shape, boolean, Device, int[], int[], int[], ADTarget)76100%n/a0101001
lambda$new$3(Function, ExecutionCall)59100%n/a0101301
Slice()33100%n/a0101501
_sliceFrame(Tensor, Tensor, int[], int[], int[])27100%2100%020401
static {...}4100%n/a010101
lambda$_slice$4(Relation)100%n/a010101
lambda$new$1(ExecutionCall)100%n/a010101
lambda$new$0(ExecutionCall)100%n/a010101
\ No newline at end of file +Slice

Slice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total34 of 56393%7 of 5086%8364106111
_slice(int[], int[], int[], Tensor)2124892%53386%52024401
calculate(double[], int, int, Function[])70%n/a111111
lambda$_sliceFrame$5(int[], int[], int[], Tensor, NDFrame)67692%2880%2611801
lambda$new$2(Class, Shape, boolean, Device, int[], int[], int[], ADTarget)76100%n/a0101001
lambda$new$3(Function, ExecutionCall)59100%n/a0101301
Slice()33100%n/a0101501
_sliceFrame(Tensor, Tensor, int[], int[], int[])27100%2100%020401
static {...}4100%n/a010101
lambda$_slice$4(Relation)100%n/a010101
lambda$new$1(ExecutionCall)100%n/a010101
lambda$new$0(ExecutionCall)100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.java.html index 87246dd7c..fdad455d9 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Slice.java.html @@ -174,8 +174,8 @@ parent.addChild( subset ); input.set( parent ); - if ( input.isOutsourced() ) - input.getDevice().store( subset ); + if ( input.isOutsourced() ) + input.getDevice().store( subset ); if ( input.isVirtual() ) subset.mut().setIsVirtual( true ); @@ -218,4 +218,4 @@ public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.html index 2a576c429..65a98e1a1 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.html @@ -1 +1 @@ -Sum

Sum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 3982%0 of 0n/a1211212
calculate(double[], int, int, Function[])70%n/a111111
Sum()32100%n/a0101101
\ No newline at end of file +Sum

Sum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 3982%0 of 0n/a1211212
calculate(double[], int, int, Function[])70%n/a111111
Sum()32100%n/a0101101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.java.html b/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.java.html index 79010da94..984ffbcb7 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/Sum.java.html @@ -27,4 +27,4 @@ @Override public double calculate( double[] inputs, int j, int d, Function[] src ) { return src[ 0 ].call( inputs, j ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/index.html b/docs/coverage/test/html/neureka.backend.main.operations.other/index.html index 35cd02096..eee140e1b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/index.html @@ -1 +1 @@ -neureka.backend.main.operations.other

neureka.backend.main.operations.other

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total718 of 3,93881%94 of 31470%9629010978624133112
DimFit2140%280%202039396611
AssignLeft16425761%161852%1534207441701
DimTrim7354288%72779%837911212001
ReLayout6123479%72175%103097131601
Permute5344489%84484%83889511201
Cat2956295%104481%1144710611701
Slice2953494%64488%736310611101
Min2816485%4233%5124432901
Max2816385%4233%5124432901
Reshape1717190%21083%3133551701
Randomization1511788%2880%3122301701
Sum73282%n/a121121201
\ No newline at end of file +neureka.backend.main.operations.other

neureka.backend.main.operations.other

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total728 of 3,93881%96 of 31469%9829011178624133112
DimFit2140%280%202039396611
AssignLeft16425761%161852%1534207441701
DimTrim7354288%72779%837911212001
ReLayout6123479%72175%103097131601
Permute5344489%84484%83889511201
Slice3452993%74386%836410611101
Cat2956295%104481%1144710611701
Min2816485%4233%5124432901
Max2816385%4233%5124432901
Reshape2216688%3975%4134551701
Randomization1511788%2880%3122301701
Sum73282%n/a121121201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations.other/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations.other/index.source.html index 415b475ef..04daffe82 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations.other/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations.other/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations.other

neureka.backend.main.operations.other

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total718 of 3,93881%94 of 31470%9629010978624133112
DimFit.java2140%280%202039396611
AssignLeft.java16425761%161852%1534207441701
DimTrim.java7354288%72779%837911212001
ReLayout.java6123479%72175%103097131601
Permute.java5344489%84484%83889511201
Cat.java2956295%104481%1144710611701
Slice.java2953494%64488%736310611101
Min.java2816485%4233%5124432901
Max.java2816385%4233%5124432901
Reshape.java1717190%21083%3133551701
Randomization.java1511788%2880%3122301701
Sum.java73282%n/a121121201
\ No newline at end of file +neureka.backend.main.operations.other

neureka.backend.main.operations.other

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total728 of 3,93881%96 of 31469%9829011178624133112
DimFit.java2140%280%202039396611
AssignLeft.java16425761%161852%1534207441701
DimTrim.java7354288%72779%837911212001
ReLayout.java6123479%72175%103097131601
Permute.java5344489%84484%83889511201
Slice.java3452993%74386%836410611101
Cat.java2956295%104481%1144710611701
Min.java2816485%4233%5124432901
Max.java2816385%4233%5124432901
Reshape.java2216688%3975%4134551701
Randomization.java1511788%2880%3122301701
Sum.java73282%n/a121121201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.html b/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.html index fa0e6e1c2..ef7426e7b 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.html +++ b/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.html @@ -1 +1 @@ -ConvUtil

ConvUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total83 of 27469%14 of 2646%11231444210
executeRecursively(String, ExecutionCall)403345%5337%3571201
lambda$createDeconvolutionFor$0(ExecutionCall)33717%7112%455601
lambda$createDeconvolutionFor$3(Function, ExecutionCall)50%n/a111111
ConvUtil()30%n/a111111
lambda$createDeconvolutionFor$2(String, ExecutionCall)27897%1375%130901
shapeOfCon(int[], int[])37100%1375%130401
createDeconvolutionFor(String)13100%n/a010501
lambda$createDeconvolutionFor$5(ExecutionCall)10100%2100%020301
lambda$createDeconvolutionFor$4(String, Function, ExecutionCall)9100%n/a010201
lambda$createDeconvolutionFor$1(String, ExecutionCall)4100%n/a010101
\ No newline at end of file +ConvUtil

ConvUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total83 of 27469%14 of 2646%11231444210
executeRecursively(String, ExecutionCall)403345%5337%3571201
lambda$createDeconvolutionFor$0(ExecutionCall)33717%7112%455601
lambda$createDeconvolutionFor$3(Function, ExecutionCall)50%n/a111111
ConvUtil()30%n/a111111
lambda$createDeconvolutionFor$2(String, ExecutionCall)27897%1375%130901
shapeOfCon(int[], int[])37100%1375%130401
createDeconvolutionFor(String)13100%n/a010501
lambda$createDeconvolutionFor$5(ExecutionCall)10100%2100%020301
lambda$createDeconvolutionFor$4(String, Function, ExecutionCall)9100%n/a010201
lambda$createDeconvolutionFor$1(String, ExecutionCall)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.java.html b/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.java.html index ed37f0cdf..1f54c4011 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations/ConvUtil.java.html @@ -88,4 +88,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.html b/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.html index 0cc86f204..863bd025d 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.html +++ b/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.html @@ -1 +1 @@ -ElemWiseUtil

ElemWiseUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 5682%0 of 2100%1541614
newTensorLike(Class, Shape, boolean, Device, double)72980%2100%023901
ElemWiseUtil()30%n/a111111
newTensorLike(Tensor, double)13100%n/a010501
static {...}4100%n/a010101
\ No newline at end of file +ElemWiseUtil

ElemWiseUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 5682%0 of 2100%1541614
newTensorLike(Class, Shape, boolean, Device, double)72980%2100%023901
ElemWiseUtil()30%n/a111111
newTensorLike(Tensor, double)13100%n/a010501
static {...}4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.java.html b/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.java.html index fd1f389a9..dac7c93c8 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.java.html +++ b/docs/coverage/test/html/neureka.backend.main.operations/ElemWiseUtil.java.html @@ -42,4 +42,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations/index.html b/docs/coverage/test/html/neureka.backend.main.operations/index.html index 6b483221b..dcddc4303 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations/index.html +++ b/docs/coverage/test/html/neureka.backend.main.operations/index.html @@ -1 +1 @@ -neureka.backend.main.operations

neureka.backend.main.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total93 of 33071%14 of 2850%1228186031402
ConvUtil8319169%141246%1123144421001
ElemWiseUtil104682%2100%154161401
\ No newline at end of file +neureka.backend.main.operations

neureka.backend.main.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total93 of 33071%14 of 2850%1228186031402
ConvUtil8319169%141246%1123144421001
ElemWiseUtil104682%2100%154161401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.main.operations/index.source.html b/docs/coverage/test/html/neureka.backend.main.operations/index.source.html index 3a0d11ea9..6279181b4 100644 --- a/docs/coverage/test/html/neureka.backend.main.operations/index.source.html +++ b/docs/coverage/test/html/neureka.backend.main.operations/index.source.html @@ -1 +1 @@ -neureka.backend.main.operations

neureka.backend.main.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total93 of 33071%14 of 2850%1228186031402
ConvUtil.java8319169%141246%1123144421001
ElemWiseUtil.java104682%2100%154161401
\ No newline at end of file +neureka.backend.main.operations

neureka.backend.main.operations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total93 of 33071%14 of 2850%1228186031402
ConvUtil.java8319169%141246%1123144421001
ElemWiseUtil.java104682%2100%154161401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.ocl/CLBackend.html b/docs/coverage/test/html/neureka.backend.ocl/CLBackend.html index 4e6f64370..470a97ae4 100644 --- a/docs/coverage/test/html/neureka.backend.ocl/CLBackend.html +++ b/docs/coverage/test/html/neureka.backend.ocl/CLBackend.html @@ -1 +1 @@ -CLBackend

CLBackend

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total67 of 1,11293%8 of 2669%710410177091
_findLoadAndCompileForAllPlatforms()657051%6650%57102501
getTotalNumberOfDevices()1386%1150%120301
_load(ReceiveForDevice)368100%n/a01011101
find(String)84100%1787%1501701
dispose()32100%4100%030601
toString()29100%n/a010201
CLBackend()13100%n/a010301
update(Component.OwnerChangeRequest)13100%n/a010401
lambda$_load$27(LoadingContext)6100%n/a010101
lambda$_load$26(LoadingContext)6100%n/a010101
lambda$_load$25(LoadingContext)6100%n/a010101
lambda$_load$23(LoadingContext)6100%n/a010101
lambda$_load$22(LoadingContext)6100%n/a010101
lambda$_load$21(LoadingContext)6100%n/a010101
lambda$_load$20(LoadingContext)6100%n/a010101
lambda$_load$19(LoadingContext)6100%n/a010101
lambda$_load$18(LoadingContext)6100%n/a010101
lambda$_load$17(LoadingContext)6100%n/a010101
lambda$_load$16(LoadingContext)6100%n/a010101
lambda$_load$15(LoadingContext)6100%n/a010101
lambda$_load$14(LoadingContext)6100%n/a010101
lambda$_load$13(LoadingContext)6100%n/a010101
lambda$_load$12(LoadingContext)6100%n/a010101
lambda$_load$11(LoadingContext)6100%n/a010101
lambda$_load$10(LoadingContext)6100%n/a010101
lambda$_load$9(LoadingContext)6100%n/a010101
lambda$_load$8(LoadingContext)6100%n/a010101
lambda$_load$7(LoadingContext)6100%n/a010101
lambda$_load$6(LoadingContext)6100%n/a010101
lambda$_load$5(LoadingContext)6100%n/a010101
lambda$getLoader$4(BackendRegistry)6100%n/a010101
lambda$_load$77(LoadingContext)5100%n/a010101
lambda$_load$76(LoadingContext)5100%n/a010101
lambda$_load$75(LoadingContext)5100%n/a010101
lambda$_load$74(LoadingContext)5100%n/a010101
lambda$_load$73(LoadingContext)5100%n/a010101
lambda$_load$72(LoadingContext)5100%n/a010101
lambda$_load$71(LoadingContext)5100%n/a010101
lambda$_load$70(LoadingContext)5100%n/a010101
lambda$_load$69(LoadingContext)5100%n/a010101
lambda$_load$68(LoadingContext)5100%n/a010101
lambda$_load$67(LoadingContext)5100%n/a010101
lambda$_load$66(LoadingContext)5100%n/a010101
lambda$_load$65(LoadingContext)5100%n/a010101
lambda$_load$64(LoadingContext)5100%n/a010101
lambda$_load$63(LoadingContext)5100%n/a010101
lambda$_load$62(LoadingContext)5100%n/a010101
lambda$_load$61(LoadingContext)5100%n/a010101
lambda$_load$60(LoadingContext)5100%n/a010101
lambda$_load$59(LoadingContext)5100%n/a010101
lambda$_load$58(LoadingContext)5100%n/a010101
lambda$_load$57(LoadingContext)5100%n/a010101
lambda$_load$56(LoadingContext)5100%n/a010101
lambda$_load$55(LoadingContext)5100%n/a010101
lambda$_load$54(LoadingContext)5100%n/a010101
lambda$_load$53(LoadingContext)5100%n/a010101
lambda$_load$52(LoadingContext)5100%n/a010101
lambda$_load$51(LoadingContext)5100%n/a010101
lambda$_load$50(LoadingContext)5100%n/a010101
lambda$_load$49(LoadingContext)5100%n/a010101
lambda$_load$48(LoadingContext)5100%n/a010101
lambda$_load$47(LoadingContext)5100%n/a010101
lambda$_load$46(LoadingContext)5100%n/a010101
lambda$_load$45(LoadingContext)5100%n/a010101
lambda$_load$44(LoadingContext)5100%n/a010101
lambda$_load$43(LoadingContext)5100%n/a010101
lambda$_load$42(LoadingContext)5100%n/a010101
lambda$_load$41(LoadingContext)5100%n/a010101
lambda$_load$40(LoadingContext)5100%n/a010101
lambda$_load$39(LoadingContext)5100%n/a010101
lambda$_load$38(LoadingContext)5100%n/a010101
lambda$_load$37(LoadingContext)5100%n/a010101
lambda$_load$36(LoadingContext)5100%n/a010101
lambda$_load$35(LoadingContext)5100%n/a010101
lambda$_load$34(LoadingContext)5100%n/a010101
lambda$_load$33(LoadingContext)5100%n/a010101
lambda$_load$32(LoadingContext)5100%n/a010101
lambda$_load$24(LoadingContext)5100%n/a010101
getPlatforms()4100%n/a010101
reset()4100%n/a010201
lambda$_load$31(LoadingContext)4100%n/a010101
lambda$_load$30(LoadingContext)4100%n/a010101
lambda$_load$29(LoadingContext)4100%n/a010101
lambda$_load$28(LoadingContext)4100%n/a010101
lambda$find$3(String, String)4100%n/a010101
lambda$find$2(String)4100%n/a010101
lambda$_findLoadAndCompileForAllPlatforms$1(OpenCLPlatform)4100%n/a010101
lambda$getTotalNumberOfDevices$0(OpenCLPlatform)4100%n/a010101
static {...}4100%n/a010101
getSettings()100%n/a010101
getLoader()100%n/a010101
\ No newline at end of file +CLBackend

CLBackend

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total718 of 1,11235%26 of 260%99104611778691
_findLoadAndCompileForAllPlatforms()1350%120%77252511
find(String)840%80%55171711
dispose()320%40%336611
toString()290%n/a112211
getTotalNumberOfDevices()150%20%223311
update(Component.OwnerChangeRequest)130%n/a114411
lambda$_load$27(LoadingContext)60%n/a111111
lambda$_load$26(LoadingContext)60%n/a111111
lambda$_load$25(LoadingContext)60%n/a111111
lambda$_load$23(LoadingContext)60%n/a111111
lambda$_load$22(LoadingContext)60%n/a111111
lambda$_load$21(LoadingContext)60%n/a111111
lambda$_load$20(LoadingContext)60%n/a111111
lambda$_load$19(LoadingContext)60%n/a111111
lambda$_load$18(LoadingContext)60%n/a111111
lambda$_load$17(LoadingContext)60%n/a111111
lambda$_load$16(LoadingContext)60%n/a111111
lambda$_load$15(LoadingContext)60%n/a111111
lambda$_load$14(LoadingContext)60%n/a111111
lambda$_load$13(LoadingContext)60%n/a111111
lambda$_load$12(LoadingContext)60%n/a111111
lambda$_load$11(LoadingContext)60%n/a111111
lambda$_load$10(LoadingContext)60%n/a111111
lambda$_load$9(LoadingContext)60%n/a111111
lambda$_load$8(LoadingContext)60%n/a111111
lambda$_load$7(LoadingContext)60%n/a111111
lambda$_load$6(LoadingContext)60%n/a111111
lambda$_load$5(LoadingContext)60%n/a111111
lambda$_load$77(LoadingContext)50%n/a111111
lambda$_load$76(LoadingContext)50%n/a111111
lambda$_load$75(LoadingContext)50%n/a111111
lambda$_load$74(LoadingContext)50%n/a111111
lambda$_load$73(LoadingContext)50%n/a111111
lambda$_load$72(LoadingContext)50%n/a111111
lambda$_load$71(LoadingContext)50%n/a111111
lambda$_load$70(LoadingContext)50%n/a111111
lambda$_load$69(LoadingContext)50%n/a111111
lambda$_load$68(LoadingContext)50%n/a111111
lambda$_load$67(LoadingContext)50%n/a111111
lambda$_load$66(LoadingContext)50%n/a111111
lambda$_load$65(LoadingContext)50%n/a111111
lambda$_load$64(LoadingContext)50%n/a111111
lambda$_load$63(LoadingContext)50%n/a111111
lambda$_load$62(LoadingContext)50%n/a111111
lambda$_load$61(LoadingContext)50%n/a111111
lambda$_load$60(LoadingContext)50%n/a111111
lambda$_load$59(LoadingContext)50%n/a111111
lambda$_load$58(LoadingContext)50%n/a111111
lambda$_load$57(LoadingContext)50%n/a111111
lambda$_load$56(LoadingContext)50%n/a111111
lambda$_load$55(LoadingContext)50%n/a111111
lambda$_load$54(LoadingContext)50%n/a111111
lambda$_load$53(LoadingContext)50%n/a111111
lambda$_load$52(LoadingContext)50%n/a111111
lambda$_load$51(LoadingContext)50%n/a111111
lambda$_load$50(LoadingContext)50%n/a111111
lambda$_load$49(LoadingContext)50%n/a111111
lambda$_load$48(LoadingContext)50%n/a111111
lambda$_load$47(LoadingContext)50%n/a111111
lambda$_load$46(LoadingContext)50%n/a111111
lambda$_load$45(LoadingContext)50%n/a111111
lambda$_load$44(LoadingContext)50%n/a111111
lambda$_load$43(LoadingContext)50%n/a111111
lambda$_load$42(LoadingContext)50%n/a111111
lambda$_load$41(LoadingContext)50%n/a111111
lambda$_load$40(LoadingContext)50%n/a111111
lambda$_load$39(LoadingContext)50%n/a111111
lambda$_load$38(LoadingContext)50%n/a111111
lambda$_load$37(LoadingContext)50%n/a111111
lambda$_load$36(LoadingContext)50%n/a111111
lambda$_load$35(LoadingContext)50%n/a111111
lambda$_load$34(LoadingContext)50%n/a111111
lambda$_load$33(LoadingContext)50%n/a111111
lambda$_load$32(LoadingContext)50%n/a111111
lambda$_load$24(LoadingContext)50%n/a111111
getPlatforms()40%n/a111111
reset()40%n/a112211
lambda$_load$31(LoadingContext)40%n/a111111
lambda$_load$30(LoadingContext)40%n/a111111
lambda$_load$29(LoadingContext)40%n/a111111
lambda$_load$28(LoadingContext)40%n/a111111
lambda$find$3(String, String)40%n/a111111
lambda$find$2(String)40%n/a111111
lambda$_findLoadAndCompileForAllPlatforms$1(OpenCLPlatform)40%n/a111111
lambda$getTotalNumberOfDevices$0(OpenCLPlatform)40%n/a111111
getSettings()0%n/a111111
_load(ReceiveForDevice)368100%n/a01011101
CLBackend()13100%n/a010301
lambda$getLoader$4(BackendRegistry)6100%n/a010101
static {...}4100%n/a010101
getLoader()100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.ocl/CLBackend.java.html b/docs/coverage/test/html/neureka.backend.ocl/CLBackend.java.html index 3e774b990..714e5ecaa 100644 --- a/docs/coverage/test/html/neureka.backend.ocl/CLBackend.java.html +++ b/docs/coverage/test/html/neureka.backend.ocl/CLBackend.java.html @@ -70,20 +70,20 @@ * @return The number of all {@link OpenCLDevice} instances across all {@link OpenCLPlatform}s. */ public int getTotalNumberOfDevices() { - List<OpenCLPlatform> platforms = getPlatforms(); - if ( getPlatforms().isEmpty() ) return 0; - return platforms.stream().mapToInt( p -> p.getDevices().size() ).sum(); + List<OpenCLPlatform> platforms = getPlatforms(); + if ( getPlatforms().isEmpty() ) return 0; + return platforms.stream().mapToInt( p -> p.getDevices().size() ).sum(); } /** * @return A list of context specific {@link OpenCLPlatform} instances possible containing {@link OpenCLDevice}s. */ - public List<OpenCLPlatform> getPlatforms() { return Collections.unmodifiableList( _platforms ); } + public List<OpenCLPlatform> getPlatforms() { return Collections.unmodifiableList( _platforms ); } /** * @return A container for OpenCL specific settings. */ - public CLSettings getSettings() { return _settings; } + public CLSettings getSettings() { return _settings; } /** * Updating the CLContext will cause the list of existing {@link OpenCLPlatform} instances to be @@ -94,17 +94,17 @@ */ @Override public boolean update( OwnerChangeRequest<Extensions> changeRequest ) { - _platforms.clear(); - _platforms.addAll( _findLoadAndCompileForAllPlatforms() ); - changeRequest.executeChange(); // This can be an 'add', 'remove' or 'transfer' of this component! - return true; + _platforms.clear(); + _platforms.addAll( _findLoadAndCompileForAllPlatforms() ); + changeRequest.executeChange(); // This can be an 'add', 'remove' or 'transfer' of this component! + return true; } @Override public String toString() { - return this.getClass().getSimpleName()+"@"+Integer.toHexString(hashCode())+"[" + + return this.getClass().getSimpleName()+"@"+Integer.toHexString(hashCode())+"[" + "platforms=["+ - _platforms.stream().map(Object::toString).collect(Collectors.joining(","))+ + _platforms.stream().map(Object::toString).collect(Collectors.joining(","))+ "]" + "]"; } @@ -115,68 +115,68 @@ private static List<OpenCLPlatform> _findLoadAndCompileForAllPlatforms() { // Obtain the number of platforms - int[] numPlatforms = new int[ 1 ]; - clGetPlatformIDs( 0, null, numPlatforms ); + int[] numPlatforms = new int[ 1 ]; + clGetPlatformIDs( 0, null, numPlatforms ); // Obtain the platform IDs - cl_platform_id[] platforms = new cl_platform_id[ numPlatforms[ 0 ] ]; - clGetPlatformIDs( platforms.length, platforms, null ); + cl_platform_id[] platforms = new cl_platform_id[ numPlatforms[ 0 ] ]; + clGetPlatformIDs( platforms.length, platforms, null ); - List<OpenCLPlatform> loadedPlatforms = new ArrayList<>(); - List<String> failures = new ArrayList<>(); - for ( cl_platform_id id : platforms ) { - OpenCLPlatform newPlatform = null; + List<OpenCLPlatform> loadedPlatforms = new ArrayList<>(); + List<String> failures = new ArrayList<>(); + for ( cl_platform_id id : platforms ) { + OpenCLPlatform newPlatform = null; try { - newPlatform = new OpenCLPlatform( id ); + newPlatform = new OpenCLPlatform( id ); } catch ( Exception e ) { String message = "Failed to instantiate '"+OpenCLPlatform.class.getSimpleName()+"' " + "with id '0x"+Long.toHexString(id.getNativePointer())+"'!"; _LOG.error( message, e ); failures.add( message + " Reason: " + e.getMessage() ); - } - if ( newPlatform != null ) - loadedPlatforms.add( newPlatform ); + } + if ( newPlatform != null ) + loadedPlatforms.add( newPlatform ); } - if ( loadedPlatforms.isEmpty() || loadedPlatforms.stream().allMatch( p -> p.getDevices().isEmpty() ) ) + if ( loadedPlatforms.isEmpty() || loadedPlatforms.stream().allMatch( p -> p.getDevices().isEmpty() ) ) _LOG.info( Messages.clContextCouldNotFindAnyDevices() ); - if ( loadedPlatforms.isEmpty() && platforms.length > 0 ) + if ( loadedPlatforms.isEmpty() && platforms.length > 0 ) // There should be at least one platform with at least one device! throw new RuntimeException( "Failed to instantiate any '"+OpenCLPlatform.class.getSimpleName()+"' instance!\n" + "Reasons: \n " + failures.stream().collect(Collectors.joining("\n ")) ); - return loadedPlatforms; + return loadedPlatforms; } @Override public DeviceOption find( String searchKey ) { - Device<Number> result = null; - double score = 0; - for ( OpenCLPlatform p : _platforms ) { - for ( OpenCLDevice d : p.getDevices() ) { - double similarity = Stream.of("opencl",d.type().name(),d.name(),d.vendor()) - .map( word -> word.trim().toLowerCase() ) - .mapToDouble( word -> ParseUtil.similarity( word, searchKey ) ) - .max() - .orElse(0); - if ( similarity > score ) { - result = d; - score = similarity; - if ( score == 1 ) - return new DeviceOption( result, score ); + Device<Number> result = null; + double score = 0; + for ( OpenCLPlatform p : _platforms ) { + for ( OpenCLDevice d : p.getDevices() ) { + double similarity = Stream.of("opencl",d.type().name(),d.name(),d.vendor()) + .map( word -> word.trim().toLowerCase() ) + .mapToDouble( word -> ParseUtil.similarity( word, searchKey ) ) + .max() + .orElse(0); + if ( similarity > score ) { + result = d; + score = similarity; + if ( score == 1 ) + return new DeviceOption( result, score ); } - } - } - return new DeviceOption( result, score ); + } + } + return new DeviceOption( result, score ); } @Override public void reset() { - _settings.reset(); - } + _settings.reset(); + } /** * This method will free all the resources occupied by this context, @@ -185,12 +185,12 @@ */ @Override public void dispose() { - for ( OpenCLPlatform platform : _platforms ) { - for ( OpenCLDevice device : platform.getDevices() ) device.dispose(); - platform.dispose(); - } - _platforms.clear(); - } + for ( OpenCLPlatform platform : _platforms ) { + for ( OpenCLDevice device : platform.getDevices() ) device.dispose(); + platform.dispose(); + } + _platforms.clear(); + } @Override public BackendLoader getLoader() { @@ -200,129 +200,129 @@ private void _load( ReceiveForDevice<OpenCLDevice> receive ) { receive.forOperation( Power.class ) - .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastPower( context.getOperationIdentidier() ) ) - .set( Broadcast.class, context -> new CLBroadcastPower( context.getOperationIdentidier() ) ) - .set( BiElementwise.class, context -> new CLBiElementwisePower( context.getOperationIdentidier() ) ); + .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastPower( context.getOperationIdentidier() ) ) + .set( Broadcast.class, context -> new CLBroadcastPower( context.getOperationIdentidier() ) ) + .set( BiElementwise.class, context -> new CLBiElementwisePower( context.getOperationIdentidier() ) ); receive.forOperation( Addition.class ) - .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastAddition(context.getOperationIdentidier()) ) - .set( Broadcast.class, context -> new CLBroadcastAddition( context.getOperationIdentidier() ) ) - .set( BiElementwise.class, context -> new CLBiElementwiseAddition( context.getOperationIdentidier() )); + .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastAddition(context.getOperationIdentidier()) ) + .set( Broadcast.class, context -> new CLBroadcastAddition( context.getOperationIdentidier() ) ) + .set( BiElementwise.class, context -> new CLBiElementwiseAddition( context.getOperationIdentidier() )); receive.forOperation( Subtraction.class ) - .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastSubtraction( context.getOperationIdentidier() ) ) - .set( Broadcast.class, context -> new CLBroadcastSubtraction( context.getOperationIdentidier() ) ) - .set( BiElementwise.class, context -> new CLBiElementwiseSubtraction( context.getOperationIdentidier() ) ); + .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastSubtraction( context.getOperationIdentidier() ) ) + .set( Broadcast.class, context -> new CLBroadcastSubtraction( context.getOperationIdentidier() ) ) + .set( BiElementwise.class, context -> new CLBiElementwiseSubtraction( context.getOperationIdentidier() ) ); receive.forOperation( Multiplication.class ) - .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastMultiplication( context.getOperationIdentidier() ) ) - .set( Broadcast.class, context -> new CLBroadcastMultiplication( context.getOperationIdentidier() ) ) - .set( BiElementwise.class, context -> new CLBiElementwiseMultiplication( context.getOperationIdentidier() ) ); + .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastMultiplication( context.getOperationIdentidier() ) ) + .set( Broadcast.class, context -> new CLBroadcastMultiplication( context.getOperationIdentidier() ) ) + .set( BiElementwise.class, context -> new CLBiElementwiseMultiplication( context.getOperationIdentidier() ) ); receive.forOperation( Division.class ) - .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastDivision( context.getOperationIdentidier() ) ) - .set( Broadcast.class, context -> new CLBroadcastDivision( context.getOperationIdentidier() ) ) - .set( BiElementwise.class, context -> new CLBiElementwiseDivision( context.getOperationIdentidier() ) ); + .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastDivision( context.getOperationIdentidier() ) ) + .set( Broadcast.class, context -> new CLBroadcastDivision( context.getOperationIdentidier() ) ) + .set( BiElementwise.class, context -> new CLBiElementwiseDivision( context.getOperationIdentidier() ) ); receive.forOperation( Modulo.class ) - .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastModulo( context.getOperationIdentidier() ) ) - .set( Broadcast.class, context -> new CLBroadcastModulo( context.getOperationIdentidier() ) ) - .set( BiElementwise.class, context -> new CLBiElementwiseModulo( context.getOperationIdentidier() ) ); + .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastModulo( context.getOperationIdentidier() ) ) + .set( Broadcast.class, context -> new CLBroadcastModulo( context.getOperationIdentidier() ) ) + .set( BiElementwise.class, context -> new CLBiElementwiseModulo( context.getOperationIdentidier() ) ); receive.forOperation( AssignLeft.class ) - .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastIdentity( context.getOperationIdentidier() ) ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.IDENTITY ) ); + .set( BiScalarBroadcast.class, context -> new CLScalarBroadcastIdentity( context.getOperationIdentidier() ) ) + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.IDENTITY ) ); receive.forOperation( Convolution.class ) - .set( NDConvolution.class, context -> new CLConvolution( context.getOperationIdentidier() ) ); + .set( NDConvolution.class, context -> new CLConvolution( context.getOperationIdentidier() ) ); receive.forOperation( XConvLeft.class ) - .set( NDConvolution.class, context -> new CLConvolution( context.getOperationIdentidier() ) ); + .set( NDConvolution.class, context -> new CLConvolution( context.getOperationIdentidier() ) ); receive.forOperation( XConvRight.class ) - .set( NDConvolution.class, context -> new CLConvolution( context.getOperationIdentidier() ) ); + .set( NDConvolution.class, context -> new CLConvolution( context.getOperationIdentidier() ) ); receive.forOperation( MatMul.class ) - .set( MatMulAlgorithm.class, context -> new CLMatMul() ); + .set( MatMulAlgorithm.class, context -> new CLMatMul() ); receive.forOperation( DotProduct.class ) - .set( DotProductAlgorithm.class, context -> new CLDot() ); + .set( DotProductAlgorithm.class, context -> new CLDot() ); receive.forOperation( Sum.class ) - .set( SumAlgorithm.class, context -> new CLSum() ); + .set( SumAlgorithm.class, context -> new CLSum() ); receive.forOperation( Randomization.class ) - .set( ElementwiseAlgorithm.class, context -> new CLRandomization() ); + .set( ElementwiseAlgorithm.class, context -> new CLRandomization() ); receive.forOperation( Absolute.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.ABSOLUTE) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.ABSOLUTE) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.ABSOLUTE) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.ABSOLUTE) ); receive.forOperation( Cosinus.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.COSINUS) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.COSINUS) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.COSINUS) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.COSINUS) ); receive.forOperation( GaSU.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GASU) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GASU) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GASU) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GASU) ); receive.forOperation( GaTU.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GATU) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GATU) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GATU) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GATU) ); receive.forOperation( Gaussian.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GAUSSIAN) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GAUSSIAN) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GAUSSIAN) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GAUSSIAN) ); receive.forOperation( GaussianFast.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GAUSSIAN_FAST) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GAUSSIAN_FAST) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GAUSSIAN_FAST) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GAUSSIAN_FAST) ); receive.forOperation( GeLU.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GELU) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GELU) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.GELU) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.GELU) ); receive.forOperation( Identity.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.IDENTITY) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.IDENTITY) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.IDENTITY) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.IDENTITY) ); receive.forOperation( Logarithm.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.LOGARITHM) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.LOGARITHM) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.LOGARITHM) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.LOGARITHM) ); receive.forOperation( Quadratic.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.QUADRATIC) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.QUADRATIC) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.QUADRATIC) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.QUADRATIC) ); receive.forOperation( ReLU.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.RELU) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.RELU) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.RELU) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.RELU) ); receive.forOperation( SeLU.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SELU) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SELU) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SELU) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SELU) ); receive.forOperation( Sigmoid.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SIGMOID) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SIGMOID) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SIGMOID) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SIGMOID) ); receive.forOperation( SiLU.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SILU) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SILU) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SILU) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SILU) ); receive.forOperation( Sinus.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SINUS) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SINUS) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SINUS) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SINUS) ); receive.forOperation( Softplus.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SOFTPLUS) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SOFTPLUS) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SOFTPLUS) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SOFTPLUS) ); receive.forOperation( Softsign.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SOFTSIGN) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SOFTSIGN) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SOFTSIGN) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SOFTSIGN) ); receive.forOperation( Tanh.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.TANH) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.TANH) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.TANH) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.TANH) ); receive.forOperation( TanhFast.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.TANH_FAST) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.TANH_FAST) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.TANH_FAST) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.TANH_FAST) ); receive.forOperation( Exp.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.EXP) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.EXP) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.EXP) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.EXP) ); receive.forOperation( Cbrt.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.CBRT) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.CBRT) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.CBRT) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.CBRT) ); receive.forOperation( Log10.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.LOG10) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.LOG10) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.LOG10) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.LOG10) ); receive.forOperation( Sqrt.class ) - .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SQRT) ) - .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SQRT) ); + .set( ElementwiseAlgorithm.class, context -> new CLElementwiseFunction( ScalarFun.SQRT) ) + .set( ScalarAlgorithm.class, context -> new CLScalarFunction(ScalarFun.SQRT) ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.ocl/CLSettings.html b/docs/coverage/test/html/neureka.backend.ocl/CLSettings.html index bb82333d8..0497bcb7e 100644 --- a/docs/coverage/test/html/neureka.backend.ocl/CLSettings.html +++ b/docs/coverage/test/html/neureka.backend.ocl/CLSettings.html @@ -1 +1 @@ -CLSettings

CLSettings

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 18100%0 of 0n/a040704
CLSettings()6100%n/a010201
setAutoConvertToFloat(boolean)5100%n/a010201
reset()4100%n/a010201
isAutoConvertToFloat()3100%n/a010101
\ No newline at end of file +CLSettings

CLSettings

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 1833%0 of 0n/a345734
setAutoConvertToFloat(boolean)50%n/a112211
reset()40%n/a112211
isAutoConvertToFloat()30%n/a111111
CLSettings()6100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.ocl/CLSettings.java.html b/docs/coverage/test/html/neureka.backend.ocl/CLSettings.java.html index 329966407..2add3c07e 100644 --- a/docs/coverage/test/html/neureka.backend.ocl/CLSettings.java.html +++ b/docs/coverage/test/html/neureka.backend.ocl/CLSettings.java.html @@ -7,16 +7,16 @@ private boolean _autoConvertToFloat = false; - public boolean isAutoConvertToFloat() { return _autoConvertToFloat; } + public boolean isAutoConvertToFloat() { return _autoConvertToFloat; } public CLSettings setAutoConvertToFloat(boolean autoConvertToFloat) { - _autoConvertToFloat = autoConvertToFloat; - return this; + _autoConvertToFloat = autoConvertToFloat; + return this; } public void reset() { - _autoConvertToFloat = false; - } + _autoConvertToFloat = false; + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.ocl/index.html b/docs/coverage/test/html/neureka.backend.ocl/index.html index 8121df71f..d67097132 100644 --- a/docs/coverage/test/html/neureka.backend.ocl/index.html +++ b/docs/coverage/test/html/neureka.backend.ocl/index.html @@ -1 +1 @@ -neureka.backend.ocl

neureka.backend.ocl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total67 of 1,13094%8 of 2669%71081018409502
CLBackend671,04593%81869%71041017709101
CLSettings18100%n/a04070401
\ No newline at end of file +neureka.backend.ocl

neureka.backend.ocl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total730 of 1,13035%26 of 260%10210866184899502
CLBackend71839435%260%9910461177869101
CLSettings1233%n/a34573401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.backend.ocl/index.source.html b/docs/coverage/test/html/neureka.backend.ocl/index.source.html index 356aa7184..63da83af7 100644 --- a/docs/coverage/test/html/neureka.backend.ocl/index.source.html +++ b/docs/coverage/test/html/neureka.backend.ocl/index.source.html @@ -1 +1 @@ -neureka.backend.ocl

neureka.backend.ocl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total67 of 1,13094%8 of 2669%71081018409502
CLBackend.java671,04593%81869%71041017709101
CLSettings.java18100%n/a04070401
\ No newline at end of file +neureka.backend.ocl

neureka.backend.ocl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total730 of 1,13035%26 of 260%10210866184899502
CLBackend.java71839435%260%9910461177869101
CLSettings.java1233%n/a34573401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$1.html b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$1.html index e425d0aec..1b01d54bf 100644 --- a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$1.html +++ b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$1.html @@ -1 +1 @@ -AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 30100%0 of 0n/a040604
{...}12100%n/a010101
executeChange()12100%n/a010301
getOldOwner()4100%n/a010101
getNewOwner()2100%n/a010101
\ No newline at end of file +AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 30100%0 of 0n/a040604
{...}12100%n/a010101
executeChange()12100%n/a010301
getOldOwner()4100%n/a010101
getNewOwner()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$2.html b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$2.html index 68fdc07f9..3ca95d8fd 100644 --- a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$2.html +++ b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$2.html @@ -1 +1 @@ -AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 33100%0 of 0n/a040604
executeChange()15100%n/a010301
{...}12100%n/a010101
getNewOwner()4100%n/a010101
getOldOwner()2100%n/a010101
\ No newline at end of file +AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 33100%0 of 0n/a040604
executeChange()15100%n/a010301
{...}12100%n/a010101
getNewOwner()4100%n/a010101
getOldOwner()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$3.html b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$3.html index 33ea409d7..292bd65a9 100644 --- a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$3.html +++ b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner$3.html @@ -1 +1 @@ -AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total19 of 190%0 of 0n/a444444
{...}90%n/a111111
getOldOwner()40%n/a111111
getNewOwner()40%n/a111111
executeChange()20%n/a111111
\ No newline at end of file +AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

AbstractComponentOwner.new Component.OwnerChangeRequest() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total19 of 190%0 of 0n/a444444
{...}90%n/a111111
getOldOwner()40%n/a111111
getNewOwner()40%n/a111111
executeChange()20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.html b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.html index 98b3a2675..60e9b2049 100644 --- a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.html +++ b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.html @@ -1 +1 @@ -AbstractComponentOwner

AbstractComponentOwner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 42391%15 of 6476%1448678116
_transferFrom(AbstractComponentOwner)350%40%336611
_add(Component)16598%1583%140801
get(Class)58100%10100%0601001
_addOrRemoveComp(Component, boolean)51100%21083%2701101
getAll(Class)43100%3562%350801
_remove(Component)36100%3562%350701
_newArrayWithout(int, Component[])36100%2250%230601
remove(Class)31100%6100%040501
set(Component)27100%4100%030701
has(Class)14100%2100%020201
AbstractComponentOwner()6100%n/a010201
_set(Component)5100%n/a010201
find(Class)5100%n/a010101
_setComps(Component[])4100%n/a010101
_deleteComponents()4100%n/a010101
_this()2100%n/a010101
\ No newline at end of file +AbstractComponentOwner

AbstractComponentOwner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 42391%15 of 6476%1448678116
_transferFrom(AbstractComponentOwner)350%40%336611
_add(Component)16598%1583%140801
get(Class)58100%10100%0601001
_addOrRemoveComp(Component, boolean)51100%21083%2701101
getAll(Class)43100%3562%350801
_remove(Component)36100%3562%350701
_newArrayWithout(int, Component[])36100%2250%230601
remove(Class)31100%6100%040501
set(Component)27100%4100%030701
has(Class)14100%2100%020201
AbstractComponentOwner()6100%n/a010201
_set(Component)5100%n/a010201
find(Class)5100%n/a010101
_setComps(Component[])4100%n/a010101
_deleteComponents()4100%n/a010101
_this()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.java.html b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.java.html index 29e4b221b..105b9bd1f 100644 --- a/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.java.html +++ b/docs/coverage/test/html/neureka.common.composition/AbstractComponentOwner.java.html @@ -328,4 +328,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/Component$IsBeing.html b/docs/coverage/test/html/neureka.common.composition/Component$IsBeing.html index 29b774eaf..374596206 100644 --- a/docs/coverage/test/html/neureka.common.composition/Component$IsBeing.html +++ b/docs/coverage/test/html/neureka.common.composition/Component$IsBeing.html @@ -1 +1 @@ -Component.IsBeing

Component.IsBeing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 27100%0 of 0n/a010101
static {...}27100%n/a010101
\ No newline at end of file +Component.IsBeing

Component.IsBeing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 44100%0 of 0n/a010101
static {...}44100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/Component$OwnerChangeRequest.html b/docs/coverage/test/html/neureka.common.composition/Component$OwnerChangeRequest.html index 6a28a345a..ced73c14b 100644 --- a/docs/coverage/test/html/neureka.common.composition/Component$OwnerChangeRequest.html +++ b/docs/coverage/test/html/neureka.common.composition/Component$OwnerChangeRequest.html @@ -1 +1 @@ -Component.OwnerChangeRequest

Component.OwnerChangeRequest

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2684%4 of 1266%471401
type()42284%4866%471401
\ No newline at end of file +Component.OwnerChangeRequest

Component.OwnerChangeRequest

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 2684%4 of 1266%471401
type()42284%4866%471401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/Component.html b/docs/coverage/test/html/neureka.common.composition/Component.html index 9fa30706d..e66547427 100644 --- a/docs/coverage/test/html/neureka.common.composition/Component.html +++ b/docs/coverage/test/html/neureka.common.composition/Component.html @@ -1 +1 @@ -Component

Component

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 5100%0 of 0n/a010201
update(Component.OwnerChangeRequest)5100%n/a010201
\ No newline at end of file +Component

Component

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 5100%0 of 0n/a010201
update(Component.OwnerChangeRequest)5100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/Component.java.html b/docs/coverage/test/html/neureka.common.composition/Component.java.html index 01d01fa32..231cdf623 100644 --- a/docs/coverage/test/html/neureka.common.composition/Component.java.html +++ b/docs/coverage/test/html/neureka.common.composition/Component.java.html @@ -92,4 +92,4 @@ return true; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/index.html b/docs/coverage/test/html/neureka.common.composition/index.html index 3835a967f..4fba8f322 100644 --- a/docs/coverage/test/html/neureka.common.composition/index.html +++ b/docs/coverage/test/html/neureka.common.composition/index.html @@ -1 +1 @@ -neureka.common.composition

neureka.common.composition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total59 of 56389%19 of 7675%22691110153117
AbstractComponentOwner3638791%154976%144867811601
AbstractComponentOwner.new Component.OwnerChangeRequest() {...}190%n/a44444411
Component.OwnerChangeRequest42284%4866%47140101
AbstractComponentOwner.new Component.OwnerChangeRequest() {...}33100%n/a04060401
AbstractComponentOwner.new Component.OwnerChangeRequest() {...}30100%n/a04060401
Component.IsBeing27100%n/a01010101
Component5100%n/a01020101
\ No newline at end of file +neureka.common.composition

neureka.common.composition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total59 of 58089%19 of 7675%22691110153117
AbstractComponentOwner3638791%154976%144867811601
AbstractComponentOwner.new Component.OwnerChangeRequest() {...}190%n/a44444411
Component.OwnerChangeRequest42284%4866%47140101
Component.IsBeing44100%n/a01010101
AbstractComponentOwner.new Component.OwnerChangeRequest() {...}33100%n/a04060401
AbstractComponentOwner.new Component.OwnerChangeRequest() {...}30100%n/a04060401
Component5100%n/a01020101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.composition/index.source.html b/docs/coverage/test/html/neureka.common.composition/index.source.html index bb18830d3..a9a7830ba 100644 --- a/docs/coverage/test/html/neureka.common.composition/index.source.html +++ b/docs/coverage/test/html/neureka.common.composition/index.source.html @@ -1 +1 @@ -neureka.common.composition

neureka.common.composition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total59 of 56389%19 of 7675%22691110153117
AbstractComponentOwner.java5545089%154976%1860109452814
Component.java5493%4866%49170303
\ No newline at end of file +neureka.common.composition

neureka.common.composition

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total59 of 58089%19 of 7675%22691110153117
AbstractComponentOwner.java5545089%154976%1860109452814
Component.java7194%4866%49170303
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/Cache$LazyEntry.html b/docs/coverage/test/html/neureka.common.utility/Cache$LazyEntry.html index 900c70759..d26610d97 100644 --- a/docs/coverage/test/html/neureka.common.utility/Cache$LazyEntry.html +++ b/docs/coverage/test/html/neureka.common.utility/Cache$LazyEntry.html @@ -1 +1 @@ -Cache.LazyEntry

Cache.LazyEntry

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 5792%3 of 862%3801204
equals(Object)41982%3350%340401
getValue()13100%2100%020201
Cache.LazyEntry(Object, Function)12100%n/a010501
hashCode()9100%n/a010101
\ No newline at end of file +Cache.LazyEntry

Cache.LazyEntry

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 5792%3 of 862%3801204
equals(Object)41982%3350%340401
getValue()13100%2100%020201
Cache.LazyEntry(Object, Function)12100%n/a010501
hashCode()9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/Cache.html b/docs/coverage/test/html/neureka.common.utility/Cache.html index 4810bb767..89dcb03f7 100644 --- a/docs/coverage/test/html/neureka.common.utility/Cache.html +++ b/docs/coverage/test/html/neureka.common.utility/Cache.html @@ -1 +1 @@ -Cache

Cache

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total59 of 24075%18 of 4257%1331841210
_index(int[])398468%111150%61251801
has(Object)170%40%332211
size()30%n/a111111
_equalsFor(Object, Object)22100%2675%250501
process(Object)21100%2100%020501
_setAt(int, Object)19100%1375%130301
_indexFor(Object)13100%2100%020101
Cache(int)10100%n/a010401
_index(int)7100%n/a010101
_getAt(int)5100%n/a010101
\ No newline at end of file +Cache

Cache

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total59 of 24075%18 of 4257%1331841210
_index(int[])398468%111150%61251801
has(Object)170%40%332211
size()30%n/a111111
_equalsFor(Object, Object)22100%2675%250501
process(Object)21100%2100%020501
_setAt(int, Object)19100%1375%130301
_indexFor(Object)13100%2100%020101
Cache(int)10100%n/a010401
_index(int)7100%n/a010101
_getAt(int)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/Cache.java.html b/docs/coverage/test/html/neureka.common.utility/Cache.java.html index a0e18708a..6c434be2c 100644 --- a/docs/coverage/test/html/neureka.common.utility/Cache.java.html +++ b/docs/coverage/test/html/neureka.common.utility/Cache.java.html @@ -134,4 +134,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/DataConverter$ForTensor.html b/docs/coverage/test/html/neureka.common.utility/DataConverter$ForTensor.html index be021cb0c..f3e9d48e8 100644 --- a/docs/coverage/test/html/neureka.common.utility/DataConverter$ForTensor.html +++ b/docs/coverage/test/html/neureka.common.utility/DataConverter$ForTensor.html @@ -1 +1 @@ -DataConverter.ForTensor

DataConverter.ForTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 206100%0 of 0n/a015032015
toFloatArray(Function)15100%n/a010401
toByteArray(Function)15100%n/a010401
toLongArray(Function)15100%n/a010401
toIntArray(Function)15100%n/a010401
toDoubleArray(Function)15100%n/a010401
toShortArray(Function)15100%n/a010401
toObjectArray(Function)15100%n/a010401
lambda$toShortArray$5(short[], Function, int)13100%n/a010101
lambda$toDoubleArray$4(double[], Function, int)13100%n/a010101
lambda$toIntArray$3(int[], Function, int)13100%n/a010101
lambda$toLongArray$2(long[], Function, int)13100%n/a010101
lambda$toByteArray$1(byte[], Function, int)13100%n/a010101
lambda$toFloatArray$0(float[], Function, int)13100%n/a010101
DataConverter.ForTensor(Tensor)12100%n/a010401
lambda$toObjectArray$6(Object[], Function, int)11100%n/a010101
\ No newline at end of file +DataConverter.ForTensor

DataConverter.ForTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 206100%0 of 0n/a015032015
toFloatArray(Function)15100%n/a010401
toByteArray(Function)15100%n/a010401
toLongArray(Function)15100%n/a010401
toIntArray(Function)15100%n/a010401
toDoubleArray(Function)15100%n/a010401
toShortArray(Function)15100%n/a010401
toObjectArray(Function)15100%n/a010401
lambda$toShortArray$5(short[], Function, int)13100%n/a010101
lambda$toDoubleArray$4(double[], Function, int)13100%n/a010101
lambda$toIntArray$3(int[], Function, int)13100%n/a010101
lambda$toLongArray$2(long[], Function, int)13100%n/a010101
lambda$toByteArray$1(byte[], Function, int)13100%n/a010101
lambda$toFloatArray$0(float[], Function, int)13100%n/a010101
DataConverter.ForTensor(Tensor)12100%n/a010401
lambda$toObjectArray$6(Object[], Function, int)11100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/DataConverter$Utility.html b/docs/coverage/test/html/neureka.common.utility/DataConverter$Utility.html index e96d12ef3..3fe0af142 100644 --- a/docs/coverage/test/html/neureka.common.utility/DataConverter$Utility.html +++ b/docs/coverage/test/html/neureka.common.utility/DataConverter$Utility.html @@ -1 +1 @@ -DataConverter.Utility

DataConverter.Utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total170 of 1,43788%57 of 21072%5616215232557
doubleToBool(double[])310%60%444411
boolToFloat(boolean[])280%60%444411
doubleToFloat(double[])112871%2466%243801
floatToDouble(float[])112871%2466%243801
lambda$floatToDouble$1(double[], float[], int)80%n/a111111
lambda$doubleToFloat$0(float[], double[], int)80%n/a111111
DataConverter.Utility()30%n/a111111
boolToDouble(boolean[])22692%1583%140401
floatToBigInteger(float[])22592%1375%130401
byteToBigInteger(byte[])22492%1375%130401
doubleToByte(double[])22492%1375%130401
doubleToShort(double[])22492%1375%130401
floatToByte(float[])22492%1375%130401
floatToShort(float[])22492%1375%130401
shortToBigInteger(short[])22492%1375%130401
doubleToBigInteger(double[])22492%1375%130401
intToBigInteger(int[])22492%1375%130401
longToByte(long[])22492%1375%130401
longToShort(long[])22492%1375%130401
byteToShort(byte[])22392%1375%130401
doubleToLong(double[])22392%1375%130401
floatToLong(float[])22392%1375%130401
shortToDouble(short[])22392%1375%130401
byteToDouble(byte[])22392%1375%130401
byteToFloat(byte[])22392%1375%130401
shortToFloat(short[])22392%1375%130401
shortToByte(short[])22392%1375%130401
byteToLong(byte[])22392%1375%130401
shortToLong(short[])22392%1375%130401
intToFloat(int[])22392%1375%130401
floatToInt(float[])22392%1375%130401
doubleToInt(double[])22392%1375%130401
intToDouble(int[])22392%1375%130401
intToLong(int[])22392%1375%130401
intToShort(int[])22392%1375%130401
intToByte(int[])22392%1375%130401
longToInt(long[])22392%1375%130401
longToFloat(long[])22392%1375%130401
longToDouble(long[])22392%1375%130401
longToBigInteger(long[])22392%1375%130401
byteToInt(byte[])22291%1375%130401
shortToInt(short[])22291%1375%130401
objectsToDoubles(Object[], int)53100%1583%140701
objectsToFloats(Object[], int)32100%1375%130501
objectsToShorts(Object[], int)32100%1375%130501
objectsToBytes(Object[], int)32100%1375%130501
objectsToLongs(Object[], int)32100%1375%130501
objectsToInts(Object[], int)32100%1375%130501
objFloatsToPrimFloats(Float[])21100%2100%020401
objDoublesToPrimDoubles(Double[])21100%2100%020401
objIntsToPrimInts(Integer[])21100%2100%020401
objLongsToPrimLongs(Long[])21100%2100%020401
objShortsToPrimShorts(Short[])21100%2100%020401
objBytesToPrimBytes(Byte[])21100%2100%020401
objBooleansToPrimBooleans(Boolean[])21100%2100%020401
objCharsToPrimChars(Character[])21100%2100%020401
intStream(int, int)12100%2100%020301
\ No newline at end of file +DataConverter.Utility

DataConverter.Utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total170 of 1,43788%57 of 21072%5616215232557
doubleToBool(double[])310%60%444411
boolToFloat(boolean[])280%60%444411
doubleToFloat(double[])112871%2466%243801
floatToDouble(float[])112871%2466%243801
lambda$floatToDouble$1(double[], float[], int)80%n/a111111
lambda$doubleToFloat$0(float[], double[], int)80%n/a111111
DataConverter.Utility()30%n/a111111
boolToDouble(boolean[])22692%1583%140401
floatToBigInteger(float[])22592%1375%130401
byteToBigInteger(byte[])22492%1375%130401
doubleToByte(double[])22492%1375%130401
doubleToShort(double[])22492%1375%130401
floatToByte(float[])22492%1375%130401
floatToShort(float[])22492%1375%130401
shortToBigInteger(short[])22492%1375%130401
doubleToBigInteger(double[])22492%1375%130401
intToBigInteger(int[])22492%1375%130401
longToByte(long[])22492%1375%130401
longToShort(long[])22492%1375%130401
byteToShort(byte[])22392%1375%130401
doubleToLong(double[])22392%1375%130401
floatToLong(float[])22392%1375%130401
shortToDouble(short[])22392%1375%130401
byteToDouble(byte[])22392%1375%130401
byteToFloat(byte[])22392%1375%130401
shortToFloat(short[])22392%1375%130401
shortToByte(short[])22392%1375%130401
byteToLong(byte[])22392%1375%130401
shortToLong(short[])22392%1375%130401
intToFloat(int[])22392%1375%130401
floatToInt(float[])22392%1375%130401
doubleToInt(double[])22392%1375%130401
intToDouble(int[])22392%1375%130401
intToLong(int[])22392%1375%130401
intToShort(int[])22392%1375%130401
intToByte(int[])22392%1375%130401
longToInt(long[])22392%1375%130401
longToFloat(long[])22392%1375%130401
longToDouble(long[])22392%1375%130401
longToBigInteger(long[])22392%1375%130401
byteToInt(byte[])22291%1375%130401
shortToInt(short[])22291%1375%130401
objectsToDoubles(Object[], int)53100%1583%140701
objectsToFloats(Object[], int)32100%1375%130501
objectsToShorts(Object[], int)32100%1375%130501
objectsToBytes(Object[], int)32100%1375%130501
objectsToLongs(Object[], int)32100%1375%130501
objectsToInts(Object[], int)32100%1375%130501
objFloatsToPrimFloats(Float[])21100%2100%020401
objDoublesToPrimDoubles(Double[])21100%2100%020401
objIntsToPrimInts(Integer[])21100%2100%020401
objLongsToPrimLongs(Long[])21100%2100%020401
objShortsToPrimShorts(Short[])21100%2100%020401
objBytesToPrimBytes(Byte[])21100%2100%020401
objBooleansToPrimBooleans(Boolean[])21100%2100%020401
objCharsToPrimChars(Character[])21100%2100%020401
intStream(int, int)12100%2100%020301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/DataConverter.html b/docs/coverage/test/html/neureka.common.utility/DataConverter.html index 306affc8a..1942bb17b 100644 --- a/docs/coverage/test/html/neureka.common.utility/DataConverter.html +++ b/docs/coverage/test/html/neureka.common.utility/DataConverter.html @@ -1 +1 @@ -DataConverter

DataConverter

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total320 of 1,31075%38 of 6238%5588131943357
convert(Object, Class)607254%31785%31182501
lambda$new$50(Object[], int)301126%3125%234601
_set(Class, Class, DataConverter.Conversion)204267%1375%1311101
lambda$new$32(Float)100%20%221111
lambda$new$24(Long)100%20%221111
lambda$new$14(BigDecimal)90%20%221111
lambda$new$12(BigInteger)90%20%221111
lambda$new$46(Boolean)80%20%221111
lambda$new$45(Boolean)80%20%221111
lambda$new$44(Boolean)80%20%221111
lambda$new$43(Boolean)80%20%221111
lambda$new$42(Boolean)80%20%221111
lambda$new$41(Boolean)80%20%221111
lambda$new$36(Byte)80%20%221111
lambda$new$20(Short)80%20%221111
lambda$new$16(Integer)80%20%221111
lambda$new$48(Boolean)70%20%221111
lambda$new$47(Boolean)70%20%221111
lambda$new$33(Float)60%n/a111111
lambda$new$15(BigDecimal)60%n/a111111
lambda$new$37(Byte)0%n/a111111
lambda$new$25(Long)0%n/a111111
lambda$new$23(Short)0%n/a111111
lambda$new$22(Short)0%n/a111111
lambda$new$21(Short)0%n/a111111
lambda$new$18(Integer)0%n/a111111
lambda$new$17(Integer)0%n/a111111
lambda$new$13(BigInteger)0%n/a111111
lambda$new$39(Byte)0%n/a111111
lambda$new$38(Byte)0%n/a111111
lambda$new$35(Float)0%n/a111111
lambda$new$34(Float)0%n/a111111
lambda$new$31(Double)0%n/a111111
lambda$new$30(Double)0%n/a111111
lambda$new$27(Long)0%n/a111111
lambda$new$26(Long)0%n/a111111
lambda$new$28(Double)880%1150%120101
DataConverter()679100%n/a01013601
lambda$new$11(List)16100%n/a010301
lambda$new$9(List)16100%n/a010301
lambda$new$7(List)16100%n/a010301
static {...}13100%n/a010301
lambda$new$10(float[], List, int)12100%n/a010101
lambda$new$8(byte[], List, int)12100%n/a010101
lambda$new$6(short[], List, int)12100%n/a010101
lambda$new$51(Object[])9100%n/a010301
lambda$new$49(String)9100%n/a010101
lambda$new$40(Boolean)8100%2100%020101
lambda$new$5(List)7100%n/a010101
lambda$new$4(Object)7100%n/a010101
lambda$new$3(List)7100%n/a010101
lambda$new$2(Object)7100%n/a010101
lambda$new$1(List)7100%n/a010101
lambda$new$0(Object)7100%n/a010101
lambda$new$29(Double)6100%n/a010101
lambda$new$19(Integer)100%n/a010101
get()100%n/a010101
\ No newline at end of file +DataConverter

DataConverter

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total320 of 1,31075%38 of 6238%5588131943357
convert(Object, Class)607254%31785%31182501
lambda$new$50(Object[], int)301126%3125%234601
_set(Class, Class, DataConverter.Conversion)204267%1375%1311101
lambda$new$32(Float)100%20%221111
lambda$new$24(Long)100%20%221111
lambda$new$14(BigDecimal)90%20%221111
lambda$new$12(BigInteger)90%20%221111
lambda$new$46(Boolean)80%20%221111
lambda$new$45(Boolean)80%20%221111
lambda$new$44(Boolean)80%20%221111
lambda$new$43(Boolean)80%20%221111
lambda$new$42(Boolean)80%20%221111
lambda$new$41(Boolean)80%20%221111
lambda$new$36(Byte)80%20%221111
lambda$new$20(Short)80%20%221111
lambda$new$16(Integer)80%20%221111
lambda$new$48(Boolean)70%20%221111
lambda$new$47(Boolean)70%20%221111
lambda$new$33(Float)60%n/a111111
lambda$new$15(BigDecimal)60%n/a111111
lambda$new$37(Byte)0%n/a111111
lambda$new$25(Long)0%n/a111111
lambda$new$23(Short)0%n/a111111
lambda$new$22(Short)0%n/a111111
lambda$new$21(Short)0%n/a111111
lambda$new$18(Integer)0%n/a111111
lambda$new$17(Integer)0%n/a111111
lambda$new$13(BigInteger)0%n/a111111
lambda$new$39(Byte)0%n/a111111
lambda$new$38(Byte)0%n/a111111
lambda$new$35(Float)0%n/a111111
lambda$new$34(Float)0%n/a111111
lambda$new$31(Double)0%n/a111111
lambda$new$30(Double)0%n/a111111
lambda$new$27(Long)0%n/a111111
lambda$new$26(Long)0%n/a111111
lambda$new$28(Double)880%1150%120101
DataConverter()679100%n/a01013601
lambda$new$11(List)16100%n/a010301
lambda$new$9(List)16100%n/a010301
lambda$new$7(List)16100%n/a010301
static {...}13100%n/a010301
lambda$new$10(float[], List, int)12100%n/a010101
lambda$new$8(byte[], List, int)12100%n/a010101
lambda$new$6(short[], List, int)12100%n/a010101
lambda$new$51(Object[])9100%n/a010301
lambda$new$49(String)9100%n/a010101
lambda$new$40(Boolean)8100%2100%020101
lambda$new$5(List)7100%n/a010101
lambda$new$4(Object)7100%n/a010101
lambda$new$3(List)7100%n/a010101
lambda$new$2(Object)7100%n/a010101
lambda$new$1(List)7100%n/a010101
lambda$new$0(Object)7100%n/a010101
lambda$new$29(Double)6100%n/a010101
lambda$new$19(Integer)100%n/a010101
get()100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/DataConverter.java.html b/docs/coverage/test/html/neureka.common.utility/DataConverter.java.html index dd03b8181..acecf08e3 100644 --- a/docs/coverage/test/html/neureka.common.utility/DataConverter.java.html +++ b/docs/coverage/test/html/neureka.common.utility/DataConverter.java.html @@ -911,4 +911,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/ListReader$Result.html b/docs/coverage/test/html/neureka.common.utility/ListReader$Result.html index c3eb8401d..1ad90b2d3 100644 --- a/docs/coverage/test/html/neureka.common.utility/ListReader$Result.html +++ b/docs/coverage/test/html/neureka.common.utility/ListReader$Result.html @@ -1 +1 @@ -ListReader.Result

ListReader.Result

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 38100%0 of 0n/a040904
ListReader.Result(Object, Function)29100%n/a010601
getType()3100%n/a010101
getShape()3100%n/a010101
getData()3100%n/a010101
\ No newline at end of file +ListReader.Result

ListReader.Result

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 38100%0 of 0n/a040904
ListReader.Result(Object, Function)29100%n/a010601
getType()3100%n/a010101
getShape()3100%n/a010101
getData()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/ListReader.html b/docs/coverage/test/html/neureka.common.utility/ListReader.html index 962da2d92..6602b80da 100644 --- a/docs/coverage/test/html/neureka.common.utility/ListReader.html +++ b/docs/coverage/test/html/neureka.common.utility/ListReader.html @@ -1 +1 @@ -ListReader

ListReader

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 31292%5 of 2882%529241015
ListReader(Object, int, List, List, Function)187981%3770%3622001
_isLeave(Object)41376%2466%240301
_findSize(List, int)71100%2100%020801
_findType(List)58100%2100%020901
lambda$new$0(int, List, List, Function, Object)11100%n/a010101
lambda$_findSize$8(int, Integer)8100%2100%020101
lambda$_findSize$7(int, Integer)8100%2100%020101
read(List, Function)7100%n/a010101
lambda$_findType$4(Class, Class)7100%2100%020101
lambda$_findType$3(Class, Class)7100%2100%020101
lambda$_findSize$6(List)5100%n/a010101
lambda$_findType$2(List)5100%n/a010101
lambda$_findSize$9(ListReader)4100%n/a010101
lambda$_findSize$5(ListReader)4100%n/a010101
lambda$_findType$1(ListReader)3100%n/a010101
\ No newline at end of file +ListReader

ListReader

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 31292%5 of 2882%529241015
ListReader(Object, int, List, List, Function)187981%3770%3622001
_isLeave(Object)41376%2466%240301
_findSize(List, int)71100%2100%020801
_findType(List)58100%2100%020901
lambda$new$0(int, List, List, Function, Object)11100%n/a010101
lambda$_findSize$8(int, Integer)8100%2100%020101
lambda$_findSize$7(int, Integer)8100%2100%020101
read(List, Function)7100%n/a010101
lambda$_findType$4(Class, Class)7100%2100%020101
lambda$_findType$3(Class, Class)7100%2100%020101
lambda$_findSize$6(List)5100%n/a010101
lambda$_findType$2(List)5100%n/a010101
lambda$_findSize$9(ListReader)4100%n/a010101
lambda$_findSize$5(ListReader)4100%n/a010101
lambda$_findType$1(ListReader)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/ListReader.java.html b/docs/coverage/test/html/neureka.common.utility/ListReader.java.html index 917eb2653..a59fa895b 100644 --- a/docs/coverage/test/html/neureka.common.utility/ListReader.java.html +++ b/docs/coverage/test/html/neureka.common.utility/ListReader.java.html @@ -127,4 +127,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/LogUtil.html b/docs/coverage/test/html/neureka.common.utility/LogUtil.html index 68d08f027..769314645 100644 --- a/docs/coverage/test/html/neureka.common.utility/LogUtil.html +++ b/docs/coverage/test/html/neureka.common.utility/LogUtil.html @@ -1 +1 @@ -LogUtil

LogUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 5290%1 of 475%251913
LogUtil()30%n/a111111
nullArgCheck(Object, String, Class, String[])24295%1375%130701
format(String, Object[])5100%n/a010101
\ No newline at end of file +LogUtil

LogUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 5290%1 of 475%251913
LogUtil()30%n/a111111
nullArgCheck(Object, String, Class, String[])24295%1375%130701
format(String, Object[])5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/LogUtil.java.html b/docs/coverage/test/html/neureka.common.utility/LogUtil.java.html index e4a0a8b25..b24eba43b 100644 --- a/docs/coverage/test/html/neureka.common.utility/LogUtil.java.html +++ b/docs/coverage/test/html/neureka.common.utility/LogUtil.java.html @@ -33,4 +33,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/SettingsLoader$TypeChecker.html b/docs/coverage/test/html/neureka.common.utility/SettingsLoader$TypeChecker.html index 8786576f6..774cc7f2e 100644 --- a/docs/coverage/test/html/neureka.common.utility/SettingsLoader$TypeChecker.html +++ b/docs/coverage/test/html/neureka.common.utility/SettingsLoader$TypeChecker.html @@ -1 +1 @@ -SettingsLoader.TypeChecker

SettingsLoader.TypeChecker

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total76 of 16453%3 of 1478%39113202
checkAndAssign(String, Class, Consumer)768251%31178%38113101
SettingsLoader.TypeChecker(Properties)6100%n/a010101
\ No newline at end of file +SettingsLoader.TypeChecker

SettingsLoader.TypeChecker

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total76 of 16453%3 of 1478%39113202
checkAndAssign(String, Class, Consumer)768251%31178%38113101
SettingsLoader.TypeChecker(Properties)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/SettingsLoader.html b/docs/coverage/test/html/neureka.common.utility/SettingsLoader.html index 40d861547..d18d5089d 100644 --- a/docs/coverage/test/html/neureka.common.utility/SettingsLoader.html +++ b/docs/coverage/test/html/neureka.common.utility/SettingsLoader.html @@ -1 +1 @@ -SettingsLoader

SettingsLoader

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 39895%1 of 475%130752028
tryGroovyClosureOn(Object, Object)74285%n/a013701
loadProperties(Neureka)514396%n/a0123501
tryGroovyScriptsOn(Neureka, Consumer)52281%1375%132901
lambda$loadProperties$20(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$16(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$15(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$14(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$13(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$12(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$11(Neureka.Settings, Integer)8100%n/a010101
lambda$loadProperties$10(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$9(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$8(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$7(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$6(Neureka.Settings, Integer)8100%n/a010101
lambda$loadProperties$19(Neureka.Settings, String)7100%n/a010101
lambda$loadProperties$18(Neureka.Settings, String)7100%n/a010101
lambda$loadProperties$17(Neureka.Settings, String)7100%n/a010101
lambda$loadProperties$23(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$21(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$5(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$4(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$3(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$2(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$1(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$0(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$22(Neureka.Settings, Class)5100%n/a010101
static {...}4100%n/a010101
\ No newline at end of file +SettingsLoader

SettingsLoader

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 40095%1 of 475%130753028
tryGroovyClosureOn(Object, Object)74285%n/a013701
loadProperties(Neureka)514596%n/a0123601
tryGroovyScriptsOn(Neureka, Consumer)52281%1375%132901
lambda$loadProperties$20(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$16(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$15(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$14(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$13(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$12(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$11(Neureka.Settings, Integer)8100%n/a010101
lambda$loadProperties$10(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$9(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$8(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$7(Neureka.Settings, Boolean)8100%n/a010101
lambda$loadProperties$6(Neureka.Settings, Integer)8100%n/a010101
lambda$loadProperties$19(Neureka.Settings, String)7100%n/a010101
lambda$loadProperties$18(Neureka.Settings, String)7100%n/a010101
lambda$loadProperties$17(Neureka.Settings, String)7100%n/a010101
lambda$loadProperties$23(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$21(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$5(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$4(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$3(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$2(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$1(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$0(Neureka.Settings, Boolean)6100%n/a010101
lambda$loadProperties$22(Neureka.Settings, Class)5100%n/a010101
static {...}4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/SettingsLoader.java.html b/docs/coverage/test/html/neureka.common.utility/SettingsLoader.java.html index b803816e0..bc8851195 100644 --- a/docs/coverage/test/html/neureka.common.utility/SettingsLoader.java.html +++ b/docs/coverage/test/html/neureka.common.utility/SettingsLoader.java.html @@ -61,7 +61,7 @@ private SettingsLoader() {/* This is a utility class! */} public static void loadProperties( Neureka instance ) { - try ( + try ( final InputStream stream = instance.getClass() .getClassLoader() .getResourceAsStream( "library_settings.properties" ) @@ -183,4 +183,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/index.html b/docs/coverage/test/html/neureka.common.utility/index.html index 00f6287a0..5ebadd9d3 100644 --- a/docs/coverage/test/html/neureka.common.utility/index.html +++ b/docs/coverage/test/html/neureka.common.utility/index.html @@ -1 +1 @@ -neureka.common.utility

neureka.common.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total673 of 4,21484%126 of 37266%1383815765441195010
DataConverter32099075%382438%558813194335701
DataConverter.Utility1701,26788%5715372%561621523255701
SettingsLoader.TypeChecker768853%31178%3911320201
Cache5918175%182457%133184121001
ListReader2229092%52382%52924101501
SettingsLoader1738195%375%13075202801
LogUtil4790%375%25191301
Cache.LazyEntry5392%3562%380120401
DataConverter.ForTensor206100%n/a01503201501
ListReader.Result38100%n/a04090401
\ No newline at end of file +neureka.common.utility

neureka.common.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total673 of 4,21684%126 of 37266%1383815765541195010
DataConverter32099075%382438%558813194335701
DataConverter.Utility1701,26788%5715372%561621523255701
SettingsLoader.TypeChecker768853%31178%3911320201
Cache5918175%182457%133184121001
ListReader2229092%52382%52924101501
SettingsLoader1738395%375%13075302801
LogUtil4790%375%25191301
Cache.LazyEntry5392%3562%380120401
DataConverter.ForTensor206100%n/a01503201501
ListReader.Result38100%n/a04090401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.common.utility/index.source.html b/docs/coverage/test/html/neureka.common.utility/index.source.html index 777d9457c..d1bcccd1f 100644 --- a/docs/coverage/test/html/neureka.common.utility/index.source.html +++ b/docs/coverage/test/html/neureka.common.utility/index.source.html @@ -1 +1 @@ -neureka.common.utility

neureka.common.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total673 of 4,21484%126 of 37266%1383815765441195010
DataConverter.java4902,46383%9517765%111265284583812903
SettingsLoader.java9346983%41477%439188403002
Cache.java6323478%212958%163985321402
ListReader.java32893%52382%53325001902
LogUtil.java4790%375%25191301
\ No newline at end of file +neureka.common.utility

neureka.common.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total673 of 4,21684%126 of 37266%1383815765541195010
DataConverter.java4902,46383%9517765%111265284583812903
SettingsLoader.java9347183%41477%439188503002
Cache.java6323478%212958%163985321402
ListReader.java32893%52382%53325001902
LogUtil.java4790%375%25191301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.html b/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.html index 4982b2995..a07167dc0 100644 --- a/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.html +++ b/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.html @@ -1 +1 @@ -AbstractFileHandle

AbstractFileHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total227 of 37439%20 of 3237%16273569311
restore(Tensor)704338%13735%711122001
AbstractFileHandle(String, FileType)704237%3350%34102001
free()251740%1150%123701
_loadFile()251130%1150%123601
_loadFileInputStream()221337%n/a014901
isEmpty()70%20%221111
contains(Tensor)50%n/a111111
numberOfStored()30%n/a111111
getFileName()15100%n/a010201
getLocation()3100%n/a010101
extension()3100%n/a010101
\ No newline at end of file +AbstractFileHandle

AbstractFileHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total227 of 37439%20 of 3237%16273569311
restore(Tensor)704338%13735%711122001
AbstractFileHandle(String, FileType)704237%3350%34102001
free()251740%1150%123701
_loadFile()251130%1150%123601
_loadFileInputStream()221337%n/a014901
isEmpty()70%20%221111
contains(Tensor)50%n/a111111
numberOfStored()30%n/a111111
getFileName()15100%n/a010201
getLocation()3100%n/a010101
extension()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.java.html b/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.java.html index c5684f190..034775cca 100644 --- a/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.java.html +++ b/docs/coverage/test/html/neureka.devices.file/AbstractFileHandle.java.html @@ -140,4 +140,4 @@ @Override public final String extension() { return _extension; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.html b/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.html index 46e09e5ea..4690ef062 100644 --- a/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.html +++ b/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.html @@ -1 +1 @@ -AbstractImageFileHandle

AbstractImageFileHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total206 of 46855%9 of 2259%10222772111
_loadHead()652729%2250%2381601
store(Tensor)643736%2250%2381501
AbstractImageFileHandle(Tensor, String, ImageFileType)405658%3562%3551601
_loadData()274864%2250%2351401
getDataSize()100%n/a111111
load()33100%n/a010501
getShape()20100%n/a010101
lambda$_loadData$0(short[], UI8, byte[], int, int)18100%2100%020101
getValueSize()10100%n/a010101
getTotalSize()10100%n/a010101
getDataType()3100%n/a010101
\ No newline at end of file +AbstractImageFileHandle

AbstractImageFileHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total206 of 46855%9 of 2259%10222772111
_loadHead()652729%2250%2381601
store(Tensor)643736%2250%2381501
AbstractImageFileHandle(Tensor, String, ImageFileType)405658%3562%3551601
_loadData()274864%2250%2351401
getDataSize()100%n/a111111
load()33100%n/a010501
getShape()20100%n/a010101
lambda$_loadData$0(short[], UI8, byte[], int, int)18100%2100%020101
getValueSize()10100%n/a010101
getTotalSize()10100%n/a010101
getDataType()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.java.html b/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.java.html index 554b70857..24302373d 100644 --- a/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.java.html +++ b/docs/coverage/test/html/neureka.devices.file/AbstractImageFileHandle.java.html @@ -163,4 +163,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/CSVHandle$CSVType.html b/docs/coverage/test/html/neureka.devices.file/CSVHandle$CSVType.html index 2d12221e0..eafd2718a 100644 --- a/docs/coverage/test/html/neureka.devices.file/CSVHandle$CSVType.html +++ b/docs/coverage/test/html/neureka.devices.file/CSVHandle$CSVType.html @@ -1 +1 @@ -CSVHandle.CSVType

CSVHandle.CSVType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 2100%0 of 0n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file +CSVHandle.CSVType

CSVHandle.CSVType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 2100%0 of 0n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/CSVHandle.html b/docs/coverage/test/html/neureka.devices.file/CSVHandle.html index 8c57d6b69..4c7e73c96 100644 --- a/docs/coverage/test/html/neureka.devices.file/CSVHandle.html +++ b/docs/coverage/test/html/neureka.devices.file/CSVHandle.html @@ -1 +1 @@ -CSVHandle

CSVHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total94 of 78688%14 of 8483%226222146920
_lazyLoad()2424991%32790%21695701
CSVHandle(Tensor, String)1720092%52784%51723701
load()911092%1990%1601801
getDataSize()7750%1150%122301
getValueSize()61368%2250%230401
store(Tensor)50%n/a111111
_loadData()30%n/a111111
getDelimiter()30%n/a111111
isFirstRowIsLabels()30%n/a111111
getColLabels()30%n/a111111
isFirstColIsIndex()30%n/a111111
getRowLabels()30%n/a111111
getNumberOfRows()30%n/a111111
getNumberOfColumns()30%n/a111111
_parseTensorNameFromFileName()3093%2250%230401
CSVHandle(String, Map)57100%2100%0201301
getShape()16100%n/a010101
static {...}4100%n/a010201
getTotalSize()3100%n/a010101
getDataType()3100%n/a010101
\ No newline at end of file +CSVHandle

CSVHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total94 of 78888%14 of 8483%226222147920
_lazyLoad()2425191%32790%21695801
CSVHandle(Tensor, String)1720092%52784%51723701
load()911092%1990%1601801
getDataSize()7750%1150%122301
getValueSize()61368%2250%230401
store(Tensor)50%n/a111111
_loadData()30%n/a111111
getDelimiter()30%n/a111111
isFirstRowIsLabels()30%n/a111111
getColLabels()30%n/a111111
isFirstColIsIndex()30%n/a111111
getRowLabels()30%n/a111111
getNumberOfRows()30%n/a111111
getNumberOfColumns()30%n/a111111
_parseTensorNameFromFileName()3093%2250%230401
CSVHandle(String, Map)57100%2100%0201301
getShape()16100%n/a010101
static {...}4100%n/a010201
getTotalSize()3100%n/a010101
getDataType()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/CSVHandle.java.html b/docs/coverage/test/html/neureka.devices.file/CSVHandle.java.html index d342c60f3..8208620dc 100644 --- a/docs/coverage/test/html/neureka.devices.file/CSVHandle.java.html +++ b/docs/coverage/test/html/neureka.devices.file/CSVHandle.java.html @@ -113,7 +113,7 @@ } List<String[]> table = new ArrayList<>(); List<String> rowLabels = ( _firstColIsIndex ) ? new ArrayList<>() : null; - try ( + try ( BufferedReader br = new BufferedReader( new InputStreamReader( fis, StandardCharsets.UTF_8 ) ) ) { String line; @@ -279,4 +279,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/FileDevice$1.html b/docs/coverage/test/html/neureka.devices.file/FileDevice$1.html index d6551be5a..ddf63a74a 100644 --- a/docs/coverage/test/html/neureka.devices.file/FileDevice$1.html +++ b/docs/coverage/test/html/neureka.devices.file/FileDevice$1.html @@ -1 +1 @@ -FileDevice.new AbstractDeviceData() {...}

FileDevice.new AbstractDeviceData() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a010101
{...}10100%n/a010101
\ No newline at end of file +FileDevice.new AbstractDeviceData() {...}

FileDevice.new AbstractDeviceData() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a010101
{...}10100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/FileDevice.html b/docs/coverage/test/html/neureka.devices.file/FileDevice.html index 362942a4f..98532fef3 100644 --- a/docs/coverage/test/html/neureka.devices.file/FileDevice.html +++ b/docs/coverage/test/html/neureka.devices.file/FileDevice.html @@ -1 +1 @@ -FileDevice

FileDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total206 of 75072%14 of 3458%214441141827
load(String, Map)657653%5550%5672301
update(Component.OwnerChangeRequest)350%20%228811
store(Tensor)168283%1150%1261501
access(Tensor)140%n/a112211
approve(ExecutionCall)140%n/a112211
optimizedOperationOf(Function, String)140%n/a112211
dispose()130%n/a115511
restore(Tensor)83480%1150%1231101
free(Tensor)83280%1150%1231101
allocate(DataType, NDConfiguration)50%n/a111111
allocateFromOne(DataType, NDConfiguration, Object)50%n/a111111
allocateFromAll(DataType, NDConfiguration, Object)50%n/a111111
_updateFolderView()47995%2880%2601501
store(Tensor, String, Map)82100%1375%1301601
toString()34100%n/a010401
lambda$_updateFolderView$0(String)25100%1150%120401
FileDevice(String)23100%n/a010701
at(String)17100%n/a010201
fileHandleOf(Tensor)12100%n/a010201
has(Tensor)11100%n/a010201
static {...}9100%n/a010201
getLoaded()7100%n/a010101
store(Tensor, String)6100%n/a010101
getLoadable()6100%n/a010101
load(String)5100%n/a010101
getDirectory()3100%n/a010101
lambda$store$1()100%n/a010101
\ No newline at end of file +FileDevice

FileDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total202 of 75073%13 of 3461%204441141827
load(String, Map)657653%5550%5672301
update(Component.OwnerChangeRequest)350%20%228811
store(Tensor)168283%1150%1261501
access(Tensor)140%n/a112211
approve(ExecutionCall)140%n/a112211
optimizedOperationOf(Function, String)140%n/a112211
dispose()130%n/a115511
restore(Tensor)83480%1150%1231101
free(Tensor)83280%1150%1231101
allocate(DataType, NDConfiguration)50%n/a111111
allocateFromOne(DataType, NDConfiguration, Object)50%n/a111111
allocateFromAll(DataType, NDConfiguration, Object)50%n/a111111
_updateFolderView()83100%1990%1601501
store(Tensor, String, Map)82100%1375%1301601
toString()34100%n/a010401
lambda$_updateFolderView$0(String)25100%1150%120401
FileDevice(String)23100%n/a010701
at(String)17100%n/a010201
fileHandleOf(Tensor)12100%n/a010201
has(Tensor)11100%n/a010201
static {...}9100%n/a010201
getLoaded()7100%n/a010101
store(Tensor, String)6100%n/a010101
getLoadable()6100%n/a010101
load(String)5100%n/a010101
getDirectory()3100%n/a010101
lambda$store$1()100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/FileDevice.java.html b/docs/coverage/test/html/neureka.devices.file/FileDevice.java.html index f2ab77359..61be2ab04 100644 --- a/docs/coverage/test/html/neureka.devices.file/FileDevice.java.html +++ b/docs/coverage/test/html/neureka.devices.file/FileDevice.java.html @@ -81,7 +81,7 @@ private void _updateFolderView() { _loadable.clear(); File dir = new File( _directory ); - if ( ! dir.exists() ) dir.mkdirs(); + if ( ! dir.exists() ) dir.mkdirs(); else { List<String> found = new ArrayList<>(); File[] files = dir.listFiles(); @@ -333,4 +333,4 @@ public List<String> getLoaded() { return new ArrayList<>(_loaded.keySet()); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/FileHandle.html b/docs/coverage/test/html/neureka.devices.file/FileHandle.html index 794855d28..cbf8327f9 100644 --- a/docs/coverage/test/html/neureka.devices.file/FileHandle.html +++ b/docs/coverage/test/html/neureka.devices.file/FileHandle.html @@ -1 +1 @@ -FileHandle

FileHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 5100%0 of 0n/a010101
static {...}5100%n/a010101
\ No newline at end of file +FileHandle

FileHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 5100%0 of 0n/a010101
static {...}5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/FileHandle.java.html b/docs/coverage/test/html/neureka.devices.file/FileHandle.java.html index e3a3df4ac..ec768d7ad 100644 --- a/docs/coverage/test/html/neureka.devices.file/FileHandle.java.html +++ b/docs/coverage/test/html/neureka.devices.file/FileHandle.java.html @@ -94,4 +94,4 @@ */ String extension(); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/HandleFactory.html b/docs/coverage/test/html/neureka.devices.file/HandleFactory.html index d958b13c4..da1704149 100644 --- a/docs/coverage/test/html/neureka.devices.file/HandleFactory.html +++ b/docs/coverage/test/html/neureka.devices.file/HandleFactory.html @@ -1 +1 @@ -HandleFactory

HandleFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 12896%0 of 0n/a113016113
lambda$new$0(String, Map)50%n/a111111
HandleFactory()61100%n/a0101201
getLoader(String)6100%n/a010101
getSaver(String)6100%n/a010101
lambda$new$7(String, Tensor, Map)6100%n/a010101
lambda$new$6(String, Tensor, Map)6100%n/a010101
lambda$new$5(String, Tensor, Map)6100%n/a010101
lambda$new$4(String, Tensor, Map)6100%n/a010101
lambda$new$3(String, Map)6100%n/a010101
hasLoader(String)5100%n/a010101
hasSaver(String)5100%n/a010101
lambda$new$2(String, Map)5100%n/a010101
lambda$new$1(String, Map)5100%n/a010101
\ No newline at end of file +HandleFactory

HandleFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 12896%0 of 0n/a113016113
lambda$new$0(String, Map)50%n/a111111
HandleFactory()61100%n/a0101201
getLoader(String)6100%n/a010101
getSaver(String)6100%n/a010101
lambda$new$7(String, Tensor, Map)6100%n/a010101
lambda$new$6(String, Tensor, Map)6100%n/a010101
lambda$new$5(String, Tensor, Map)6100%n/a010101
lambda$new$4(String, Tensor, Map)6100%n/a010101
lambda$new$3(String, Map)6100%n/a010101
hasLoader(String)5100%n/a010101
hasSaver(String)5100%n/a010101
lambda$new$2(String, Map)5100%n/a010101
lambda$new$1(String, Map)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/HandleFactory.java.html b/docs/coverage/test/html/neureka.devices.file/HandleFactory.java.html index 5821d01a6..bb55d61cb 100644 --- a/docs/coverage/test/html/neureka.devices.file/HandleFactory.java.html +++ b/docs/coverage/test/html/neureka.devices.file/HandleFactory.java.html @@ -53,4 +53,4 @@ return _SAVERS.get(name); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/IDXHandle$IDXType.html b/docs/coverage/test/html/neureka.devices.file/IDXHandle$IDXType.html index 2a7640cff..b9e189e29 100644 --- a/docs/coverage/test/html/neureka.devices.file/IDXHandle$IDXType.html +++ b/docs/coverage/test/html/neureka.devices.file/IDXHandle$IDXType.html @@ -1 +1 @@ -IDXHandle.IDXType

IDXHandle.IDXType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 2100%0 of 0n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file +IDXHandle.IDXType

IDXHandle.IDXType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 2100%0 of 0n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/IDXHandle.html b/docs/coverage/test/html/neureka.devices.file/IDXHandle.html index 2482f7fee..7d9a0e8f4 100644 --- a/docs/coverage/test/html/neureka.devices.file/IDXHandle.html +++ b/docs/coverage/test/html/neureka.devices.file/IDXHandle.html @@ -1 +1 @@ -IDXHandle

IDXHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total64 of 46786%7 of 1861%82116107112
store(Tensor)4011273%2466%24103901
_loadData()85186%2250%2321101
IDXHandle(String)61164%n/a013701
_loadHead()48595%1375%1301701
getShape()30%n/a111111
load()22592%1150%120501
getDataSize()1794%1150%120401
static {...}65100%n/a0101401
IDXHandle(Tensor, String)25100%n/a010601
getTotalSize()6100%n/a010101
getDataType()3100%n/a010101
getValueSize()3100%n/a010101
\ No newline at end of file +IDXHandle

IDXHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total64 of 46786%7 of 1861%82116107112
store(Tensor)4011273%2466%24103901
_loadData()85186%2250%2321101
IDXHandle(String)61164%n/a013701
_loadHead()48595%1375%1301701
getShape()30%n/a111111
load()22592%1150%120501
getDataSize()1794%1150%120401
static {...}65100%n/a0101401
IDXHandle(Tensor, String)25100%n/a010601
getTotalSize()6100%n/a010101
getDataType()3100%n/a010101
getValueSize()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/IDXHandle.java.html b/docs/coverage/test/html/neureka.devices.file/IDXHandle.java.html index 9bce47558..dc0462cf8 100644 --- a/docs/coverage/test/html/neureka.devices.file/IDXHandle.java.html +++ b/docs/coverage/test/html/neureka.devices.file/IDXHandle.java.html @@ -220,4 +220,4 @@ @Override public String defaultExtension() { return "idx"; } } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/ImageFileType.html b/docs/coverage/test/html/neureka.devices.file/ImageFileType.html index 321451687..2d25b4921 100644 --- a/docs/coverage/test/html/neureka.devices.file/ImageFileType.html +++ b/docs/coverage/test/html/neureka.devices.file/ImageFileType.html @@ -1 +1 @@ -ImageFileType

ImageFileType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 16100%0 of 0n/a030303
numericTypeRepresentation()8100%n/a010101
numberOfChannels()4100%n/a010101
targetedValueType()4100%n/a010101
\ No newline at end of file +ImageFileType

ImageFileType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 16100%0 of 0n/a030303
numericTypeRepresentation()8100%n/a010101
numberOfChannels()4100%n/a010101
targetedValueType()4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/ImageFileType.java.html b/docs/coverage/test/html/neureka.devices.file/ImageFileType.java.html index 2735b3095..463a2bf1b 100644 --- a/docs/coverage/test/html/neureka.devices.file/ImageFileType.java.html +++ b/docs/coverage/test/html/neureka.devices.file/ImageFileType.java.html @@ -21,4 +21,4 @@ String imageTypeName(); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/JPEGHandle$1.html b/docs/coverage/test/html/neureka.devices.file/JPEGHandle$1.html index 9354207cd..17341ed86 100644 --- a/docs/coverage/test/html/neureka.devices.file/JPEGHandle$1.html +++ b/docs/coverage/test/html/neureka.devices.file/JPEGHandle$1.html @@ -1 +1 @@ -JPEGHandle.new ImageFileType() {...}

JPEGHandle.new ImageFileType() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 977%0 of 0n/a141414
imageTypeName()20%n/a111111
{...}3100%n/a010101
imageType()2100%n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file +JPEGHandle.new ImageFileType() {...}

JPEGHandle.new ImageFileType() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 977%0 of 0n/a141414
imageTypeName()20%n/a111111
{...}3100%n/a010101
imageType()2100%n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/JPEGHandle.html b/docs/coverage/test/html/neureka.devices.file/JPEGHandle.html index bd88ae645..0a0c90afa 100644 --- a/docs/coverage/test/html/neureka.devices.file/JPEGHandle.html +++ b/docs/coverage/test/html/neureka.devices.file/JPEGHandle.html @@ -1 +1 @@ -JPEGHandle

JPEGHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 17100%0 of 0n/a030503
JPEGHandle(Tensor, String)8100%n/a010201
JPEGHandle(String)5100%n/a010101
static {...}4100%n/a010201
\ No newline at end of file +JPEGHandle

JPEGHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 17100%0 of 0n/a030503
JPEGHandle(Tensor, String)8100%n/a010201
JPEGHandle(String)5100%n/a010101
static {...}4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/JPEGHandle.java.html b/docs/coverage/test/html/neureka.devices.file/JPEGHandle.java.html index bebad11e1..85b43f7c6 100644 --- a/docs/coverage/test/html/neureka.devices.file/JPEGHandle.java.html +++ b/docs/coverage/test/html/neureka.devices.file/JPEGHandle.java.html @@ -32,4 +32,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/NumberReader.html b/docs/coverage/test/html/neureka.devices.file/NumberReader.html index 743691417..6637a959c 100644 --- a/docs/coverage/test/html/neureka.devices.file/NumberReader.html +++ b/docs/coverage/test/html/neureka.devices.file/NumberReader.html @@ -1 +1 @@ -NumberReader

NumberReader

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 8892%1 of 250%2611515
read(NumericType)43188%1150%120301
getStream()30%n/a111111
NumberReader(FileInputStream)46100%n/a010901
bytesRead()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +NumberReader

NumberReader

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 8892%1 of 250%2611515
read(NumericType)43188%1150%120301
getStream()30%n/a111111
NumberReader(FileInputStream)46100%n/a010901
bytesRead()3100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/NumberReader.java.html b/docs/coverage/test/html/neureka.devices.file/NumberReader.java.html index e59d51142..cec2549e2 100644 --- a/docs/coverage/test/html/neureka.devices.file/NumberReader.java.html +++ b/docs/coverage/test/html/neureka.devices.file/NumberReader.java.html @@ -40,4 +40,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/PNGHandle$1.html b/docs/coverage/test/html/neureka.devices.file/PNGHandle$1.html index 06bb66339..fb50aae3d 100644 --- a/docs/coverage/test/html/neureka.devices.file/PNGHandle$1.html +++ b/docs/coverage/test/html/neureka.devices.file/PNGHandle$1.html @@ -1 +1 @@ -PNGHandle.new ImageFileType() {...}

PNGHandle.new ImageFileType() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 977%0 of 0n/a141414
imageTypeName()20%n/a111111
{...}3100%n/a010101
imageType()2100%n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file +PNGHandle.new ImageFileType() {...}

PNGHandle.new ImageFileType() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 977%0 of 0n/a141414
imageTypeName()20%n/a111111
{...}3100%n/a010101
imageType()2100%n/a010101
defaultExtension()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/PNGHandle.html b/docs/coverage/test/html/neureka.devices.file/PNGHandle.html index 8e98dde52..a1e684af0 100644 --- a/docs/coverage/test/html/neureka.devices.file/PNGHandle.html +++ b/docs/coverage/test/html/neureka.devices.file/PNGHandle.html @@ -1 +1 @@ -PNGHandle

PNGHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 17100%0 of 0n/a030503
PNGHandle(Tensor, String)8100%n/a010201
PNGHandle(String)5100%n/a010101
static {...}4100%n/a010201
\ No newline at end of file +PNGHandle

PNGHandle

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 17100%0 of 0n/a030503
PNGHandle(Tensor, String)8100%n/a010201
PNGHandle(String)5100%n/a010101
static {...}4100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/PNGHandle.java.html b/docs/coverage/test/html/neureka.devices.file/PNGHandle.java.html index 5a5405298..574dd88bd 100644 --- a/docs/coverage/test/html/neureka.devices.file/PNGHandle.java.html +++ b/docs/coverage/test/html/neureka.devices.file/PNGHandle.java.html @@ -23,4 +23,4 @@ ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/index.html b/docs/coverage/test/html/neureka.devices.file/index.html index e85a210c8..aad84d319 100644 --- a/docs/coverage/test/html/neureka.devices.file/index.html +++ b/docs/coverage/test/html/neureka.devices.file/index.html @@ -1 +1 @@ -neureka.devices.file

neureka.devices.file

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total813 of 3,14874%65 of 19266%8221614459026120016
AbstractFileHandle22714739%201237%1627356931101
FileDevice20654472%142058%21444114182701
AbstractImageFileHandle20626255%91359%1022277211101
CSVHandle9469288%147083%22622214692001
IDXHandle6440386%71161%8211610711201
NumberReader78192%1150%261151501
HandleFactory12396%n/a11301611301
PNGHandle.new ImageFileType() {...}777%n/a14141401
JPEGHandle.new ImageFileType() {...}777%n/a14141401
JPEGHandle17100%n/a03050301
PNGHandle17100%n/a03050301
ImageFileType16100%n/a03030301
FileDevice.new AbstractDeviceData() {...}10100%n/a01010101
FileHandle100%n/a01010101
CSVHandle.CSVType100%n/a01010101
IDXHandle.IDXType100%n/a01010101
\ No newline at end of file +neureka.devices.file

neureka.devices.file

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total809 of 3,15074%64 of 19266%8121614459126120016
AbstractFileHandle22714739%201237%1627356931101
AbstractImageFileHandle20626255%91359%1022277211101
FileDevice20254873%132161%20444114182701
CSVHandle9469488%147083%22622214792001
IDXHandle6440386%71161%8211610711201
NumberReader78192%1150%261151501
HandleFactory12396%n/a11301611301
PNGHandle.new ImageFileType() {...}777%n/a14141401
JPEGHandle.new ImageFileType() {...}777%n/a14141401
JPEGHandle17100%n/a03050301
PNGHandle17100%n/a03050301
ImageFileType16100%n/a03030301
FileDevice.new AbstractDeviceData() {...}10100%n/a01010101
FileHandle100%n/a01010101
CSVHandle.CSVType100%n/a01010101
IDXHandle.IDXType100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.file/index.source.html b/docs/coverage/test/html/neureka.devices.file/index.source.html index 9b4a61971..1a6bebd15 100644 --- a/docs/coverage/test/html/neureka.devices.file/index.source.html +++ b/docs/coverage/test/html/neureka.devices.file/index.source.html @@ -1 +1 @@ -neureka.devices.file

neureka.devices.file

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total813 of 3,14874%65 of 19266%8221614459026120016
AbstractFileHandle.java22714739%201237%1627356931101
FileDevice.java20655472%142058%21454114182802
AbstractImageFileHandle.java20626255%91359%1022277211101
CSVHandle.java9469488%147083%22632214792102
IDXHandle.java6440586%71161%8221610811302
NumberReader.java78192%1150%261151501
HandleFactory.java12396%n/a11301611301
PNGHandle.java2492%n/a17191702
JPEGHandle.java2492%n/a17191702
ImageFileType.java16100%n/a03030301
FileHandle.java100%n/a01010101
\ No newline at end of file +neureka.devices.file

neureka.devices.file

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total809 of 3,15074%64 of 19266%8121614459126120016
AbstractFileHandle.java22714739%201237%1627356931101
AbstractImageFileHandle.java20626255%91359%1022277211101
FileDevice.java20255873%132161%20454114182802
CSVHandle.java9469688%147083%22632214892102
IDXHandle.java6440586%71161%8221610811302
NumberReader.java78192%1150%261151501
HandleFactory.java12396%n/a11301611301
PNGHandle.java2492%n/a17191702
JPEGHandle.java2492%n/a17191702
ImageFileType.java16100%n/a03030301
FileHandle.java100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.html b/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.html index 0b6e58b11..23604921b 100644 --- a/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.html +++ b/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.html @@ -1 +1 @@ -Parallelism

Parallelism

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 8182%0 of 0n/a610010610
lambda$static$2()30%n/a111111
lambda$static$1()30%n/a111111
lambda$static$6()20%n/a111111
lambda$static$5()20%n/a111111
lambda$static$4()20%n/a111111
lambda$static$3()20%n/a111111
static {...}52100%n/a010801
Parallelism(String, int, IntSupplier)8100%n/a010101
getAsInt()4100%n/a010101
lambda$static$0()3100%n/a010101
\ No newline at end of file +Parallelism

Parallelism

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 11087%0 of 0n/a610010610
lambda$static$2()30%n/a111111
lambda$static$1()30%n/a111111
lambda$static$6()20%n/a111111
lambda$static$5()20%n/a111111
lambda$static$4()20%n/a111111
lambda$static$3()20%n/a111111
static {...}81100%n/a010801
Parallelism(String, int, IntSupplier)8100%n/a010101
getAsInt()4100%n/a010101
lambda$static$0()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.java.html b/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.java.html index b61cd46ee..14e69c333 100644 --- a/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.java.html +++ b/docs/coverage/test/html/neureka.devices.host.concurrent/Parallelism.java.html @@ -72,4 +72,4 @@ public int getAsInt() { return _supplier.getAsInt(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler$Divider.html b/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler$Divider.html index f7a74c190..9d0ef5b03 100644 --- a/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler$Divider.html +++ b/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler$Divider.html @@ -1 +1 @@ -WorkScheduler.Divider

WorkScheduler.Divider

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 16989%1 of 1090%215638110
_divide(ExecutorService, int, int, int, int, CPU.RangeWorkload)65389%4100%0321401
submit(int, CPU.IndexedWorkload)64287%4100%0321001
divide(int, CPU.RangeWorkload)60%n/a112211
WorkScheduler.Divider(ExecutorService)12100%n/a010501
divide(int, int, CPU.RangeWorkload)12100%n/a010301
lambda$_divide$2(ExecutorService, int, int, int, int, CPU.RangeWorkload)8100%n/a010101
lambda$_divide$1(ExecutorService, int, int, int, int, CPU.RangeWorkload)8100%n/a010101
parallelism(IntSupplier)7100%1150%120201
threshold(int)5100%n/a010201
lambda$submit$0(CPU.IndexedWorkload, int)4100%n/a010101
\ No newline at end of file +WorkScheduler.Divider

WorkScheduler.Divider

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 16989%1 of 1090%215638110
_divide(ExecutorService, int, int, int, int, CPU.RangeWorkload)65389%4100%0321401
submit(int, CPU.IndexedWorkload)64287%4100%0321001
divide(int, CPU.RangeWorkload)60%n/a112211
WorkScheduler.Divider(ExecutorService)12100%n/a010501
divide(int, int, CPU.RangeWorkload)12100%n/a010301
lambda$_divide$2(ExecutorService, int, int, int, int, CPU.RangeWorkload)8100%n/a010101
lambda$_divide$1(ExecutorService, int, int, int, int, CPU.RangeWorkload)8100%n/a010101
parallelism(IntSupplier)7100%1150%120201
threshold(int)5100%n/a010201
lambda$submit$0(CPU.IndexedWorkload, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.html b/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.html index 8caad1d6e..cbab361f0 100644 --- a/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.html +++ b/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.html @@ -1 +1 @@ -WorkScheduler

WorkScheduler

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 150%0 of 0n/a224422
invoke(ExecutorService, int, int, int)120%n/a113311
WorkScheduler()30%n/a111111
\ No newline at end of file +WorkScheduler

WorkScheduler

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 150%0 of 0n/a224422
invoke(ExecutorService, int, int, int)120%n/a113311
WorkScheduler()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.java.html b/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.java.html index 45be193b6..bca2f6a54 100644 --- a/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.java.html +++ b/docs/coverage/test/html/neureka.devices.host.concurrent/WorkScheduler.java.html @@ -161,4 +161,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.concurrent/index.html b/docs/coverage/test/html/neureka.devices.host.concurrent/index.html index dfa2995d4..91aa53fd0 100644 --- a/docs/coverage/test/html/neureka.devices.host.concurrent/index.html +++ b/docs/coverage/test/html/neureka.devices.host.concurrent/index.html @@ -1 +1 @@ -neureka.devices.host.concurrent

neureka.devices.host.concurrent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 26582%1 of 1090%1027105292213
WorkScheduler.Divider1815189%1990%21563811001
WorkScheduler150%n/a22442211
Parallelism146782%n/a61001061001
\ No newline at end of file +neureka.devices.host.concurrent

neureka.devices.host.concurrent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 29484%1 of 1090%1027105292213
WorkScheduler.Divider1815189%1990%21563811001
WorkScheduler150%n/a22442211
Parallelism149687%n/a61001061001
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.concurrent/index.source.html b/docs/coverage/test/html/neureka.devices.host.concurrent/index.source.html index 4df56fc49..1fbf7d003 100644 --- a/docs/coverage/test/html/neureka.devices.host.concurrent/index.source.html +++ b/docs/coverage/test/html/neureka.devices.host.concurrent/index.source.html @@ -1 +1 @@ -neureka.devices.host.concurrent

neureka.devices.host.concurrent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 26582%1 of 1090%1027105292213
WorkScheduler.java3315182%1990%417104231212
Parallelism.java146782%n/a61001061001
\ No newline at end of file +neureka.devices.host.concurrent

neureka.devices.host.concurrent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 29484%1 of 1090%1027105292213
WorkScheduler.java3315182%1990%417104231212
Parallelism.java149687%n/a61001061001
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.html b/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.html index 0efe2a30e..3d6e0b44b 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.html @@ -1 +1 @@ -BasicMachine

BasicMachine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total167 of 1765%24 of 240%1718232745
toString()1020%140%99101011
equals(Object)340%100%667711
hashCode()260%n/a115511
BasicMachine()50%n/a111111
BasicMachine(long, int)9100%n/a010401
\ No newline at end of file +BasicMachine

BasicMachine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total167 of 1765%24 of 240%1718232745
toString()1020%140%99101011
equals(Object)340%100%667711
hashCode()260%n/a115511
BasicMachine()50%n/a111111
BasicMachine(long, int)9100%n/a010401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.java.html b/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.java.html index 35de9a519..72bf7fe99 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.java.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/BasicMachine.java.html @@ -72,4 +72,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.html b/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.html index 0f8334b3c..bcac7cef0 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.html @@ -1 +1 @@ -CommonMachine

CommonMachine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total106 of 17639%20 of 200%1214193124
equals(Object)590%180%1010121211
hashCode()470%20%227711
CommonMachine(String, BasicMachine[])41100%n/a010601
CommonMachine(Hardware, Runtime)29100%n/a010601
\ No newline at end of file +CommonMachine

CommonMachine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total106 of 17639%20 of 200%1214193124
equals(Object)590%180%1010121211
hashCode()470%20%227711
CommonMachine(String, BasicMachine[])41100%n/a010601
CommonMachine(Hardware, Runtime)29100%n/a010601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.java.html b/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.java.html index 2ed0e95e6..50cdd45c6 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.java.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/CommonMachine.java.html @@ -111,4 +111,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.html b/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.html index 8134ea905..afe23da85 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.html @@ -1 +1 @@ -ConcreteMachine

ConcreteMachine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total81 of 17152%18 of 2630%1421153838
equals(Object)350%100%667711
hashCode()200%20%224411
toString()140%n/a111111
static {...}85988%4660%3611401
getArchitecture()41477%2250%232601
ConcreteMachine(Hardware, Runtime)11100%n/a010401
getMemory()3100%n/a010101
getThreads()3100%n/a010101
\ No newline at end of file +ConcreteMachine

ConcreteMachine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total98 of 17142%16 of 2638%1321173838
equals(Object)350%100%667711
static {...}254262%2880%2631401
hashCode()200%20%224411
toString()140%n/a111111
getArchitecture()41477%2250%232601
ConcreteMachine(Hardware, Runtime)11100%n/a010401
getMemory()3100%n/a010101
getThreads()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.java.html b/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.java.html index 3e32ca7c9..642c30ffa 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.java.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/ConcreteMachine.java.html @@ -38,17 +38,17 @@ int threads = ConcreteMachine.getThreads(); for (Hardware hw : Hardware.PREDEFINED) { - if (hw.architecture.equals(architecture) && (hw.threads == threads) && (hw.memory >= memory)) - ENVIRONMENT = hw.virtualize(); + if (hw.architecture.equals(architecture) && (hw.threads == threads) && (hw.memory >= memory)) + ENVIRONMENT = hw.virtualize(); } if (ENVIRONMENT == null) { - _LOG.debug( + _LOG.debug( "No matching hardware profile found for this system. " + "Instantiating a default hardware profile with the following main properties: " + - "Architecture={} Threads={} Memory={}", architecture, threads, memory + "Architecture={} Threads={} Memory={}", architecture, threads, memory ); - ENVIRONMENT = Hardware.makeSimple(architecture, memory, threads).virtualize(); + ENVIRONMENT = Hardware.makeSimple(architecture, memory, threads).virtualize(); } } @@ -117,4 +117,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/Hardware.html b/docs/coverage/test/html/neureka.devices.host.machine/Hardware.html index fd36247a6..cfe036d92 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/Hardware.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/Hardware.html @@ -1 +1 @@ -Hardware

Hardware

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total243 of 89972%21 of 3234%18273786611
makeSimple(String, long, int)895739%3125%23101601
toString()770%40%338811
equals(Object)290%80%55101011
hashCode()150%n/a114411
isL2Specified()90%20%221111
isL3Specified()90%20%221111
compareTo(Hardware)65590%1990%1611101
makeSimple()50%n/a111111
Hardware(String, BasicMachine[])1477%1150%121501
static {...}524100%n/a0102801
virtualize()6100%n/a010101
\ No newline at end of file +Hardware

Hardware

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total300 of 89966%22 of 3231%19274386711
makeSimple(String, long, int)1460%40%33161611
toString()770%40%338811
equals(Object)290%80%55101011
hashCode()150%n/a114411
isL2Specified()90%20%221111
isL3Specified()90%20%221111
compareTo(Hardware)65590%1990%1611101
makeSimple()50%n/a111111
Hardware(String, BasicMachine[])1477%1150%121501
static {...}524100%n/a0102801
virtualize()6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/Hardware.java.html b/docs/coverage/test/html/neureka.devices.host.machine/Hardware.java.html index a40eaa8df..0381c4c7b 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/Hardware.java.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/Hardware.java.html @@ -513,14 +513,14 @@ public static Hardware makeSimple(final String systemArchitecture, final long systemMemory, final int systemThreads) { - if (systemThreads > 8) { + if (systemThreads > 8) { // Assume hyperthreading, L3 cache and more than 1 CPU - final BasicMachine tmpL1Machine = new BasicMachine(32L * K, 2); //Hyperthreading - final BasicMachine tmpL2Machine = new BasicMachine(256L * K, tmpL1Machine.threads); - final BasicMachine tmpL3Machine = new BasicMachine(4L * K * K, systemThreads / ((systemThreads + 7) / 8)); //More than 1 CPU - final BasicMachine tmpSystemMachine = new BasicMachine(systemMemory, systemThreads); - return new Hardware(systemArchitecture, new BasicMachine[] { tmpSystemMachine, tmpL3Machine, tmpL2Machine, tmpL1Machine }); + final BasicMachine tmpL1Machine = new BasicMachine(32L * K, 2); //Hyperthreading + final BasicMachine tmpL2Machine = new BasicMachine(256L * K, tmpL1Machine.threads); + final BasicMachine tmpL3Machine = new BasicMachine(4L * K * K, systemThreads / ((systemThreads + 7) / 8)); //More than 1 CPU + final BasicMachine tmpSystemMachine = new BasicMachine(systemMemory, systemThreads); + return new Hardware(systemArchitecture, new BasicMachine[] { tmpSystemMachine, tmpL3Machine, tmpL2Machine, tmpL1Machine }); } else if (systemThreads >= 4) { // Assume hyperthreading, L3 cache but only 1 CPU @@ -636,4 +636,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/index.html b/docs/coverage/test/html/neureka.devices.host.machine/index.html index 2562892c6..68e7741b8 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/index.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/index.html @@ -1 +1 @@ -neureka.devices.host.machine

neureka.devices.host.machine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total597 of 1,42258%83 of 10218%618094182152804
Hardware24365672%211134%1827378661101
BasicMachine16795%240%171823274501
CommonMachine1067039%200%121419312401
ConcreteMachine819052%18830%142115383801
\ No newline at end of file +neureka.devices.host.machine

neureka.devices.host.machine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total671 of 1,42252%82 of 10219%6180102182162804
Hardware30059966%221031%1927438671101
BasicMachine16795%240%171823274501
CommonMachine1067039%200%121419312401
ConcreteMachine987342%161038%132117383801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host.machine/index.source.html b/docs/coverage/test/html/neureka.devices.host.machine/index.source.html index d5c1ab878..73d15cb41 100644 --- a/docs/coverage/test/html/neureka.devices.host.machine/index.source.html +++ b/docs/coverage/test/html/neureka.devices.host.machine/index.source.html @@ -1 +1 @@ -neureka.devices.host.machine

neureka.devices.host.machine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total597 of 1,42258%83 of 10218%618094182152804
Hardware.java24365672%211134%1827378661101
BasicMachine.java16795%240%171823274501
CommonMachine.java1067039%200%121419312401
ConcreteMachine.java819052%18830%142115383801
\ No newline at end of file +neureka.devices.host.machine

neureka.devices.host.machine

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total671 of 1,42252%82 of 10219%6180102182162804
Hardware.java30059966%221031%1927438671101
BasicMachine.java16795%240%171823274501
CommonMachine.java1067039%200%121419312401
ConcreteMachine.java987342%161038%132117383801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host/CPU$JVMExecutor.html b/docs/coverage/test/html/neureka.devices.host/CPU$JVMExecutor.html index 7dc86eac4..715deda3b 100644 --- a/docs/coverage/test/html/neureka.devices.host/CPU$JVMExecutor.html +++ b/docs/coverage/test/html/neureka.devices.host/CPU$JVMExecutor.html @@ -1 +1 @@ -CPU.JVMExecutor

CPU.JVMExecutor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 16493%2 of 875%217035013
_newThreadFactory(ThreadGroup, String)81157%1150%120201
threaded(int, CPU.RangeWorkload)23294%1583%140701
lambda$_newThreadFactory$0(ThreadGroup, String, Runnable)20100%n/a010401
CPU.JVMExecutor()18100%n/a010301
threaded(int, CPU.IndexedWorkload)16100%n/a010501
threaded(int, int, CPU.RangeWorkload)16100%n/a010501
sequential(int, CPU.RangeWorkload)11100%n/a010301
static {...}10100%n/a010201
_newThreadFactory(String)4100%n/a010101
getActiveThreadCount()4100%n/a010101
getCorePoolSize()4100%n/a010101
getCompletedTaskCount()4100%n/a010101
lambda$threaded$1(CPU.IndexedWorkload, int)4100%n/a010101
\ No newline at end of file +CPU.JVMExecutor

CPU.JVMExecutor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 16493%2 of 875%217035013
_newThreadFactory(ThreadGroup, String)81157%1150%120201
threaded(int, CPU.RangeWorkload)23294%1583%140701
lambda$_newThreadFactory$0(ThreadGroup, String, Runnable)20100%n/a010401
CPU.JVMExecutor()18100%n/a010301
threaded(int, CPU.IndexedWorkload)16100%n/a010501
threaded(int, int, CPU.RangeWorkload)16100%n/a010501
sequential(int, CPU.RangeWorkload)11100%n/a010301
static {...}10100%n/a010201
_newThreadFactory(String)4100%n/a010101
getActiveThreadCount()4100%n/a010101
getCorePoolSize()4100%n/a010101
getCompletedTaskCount()4100%n/a010101
lambda$threaded$1(CPU.IndexedWorkload, int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host/CPU.html b/docs/coverage/test/html/neureka.devices.host/CPU.html index 5fc5fb5c2..e2b8e9e49 100644 --- a/docs/coverage/test/html/neureka.devices.host/CPU.html +++ b/docs/coverage/test/html/neureka.devices.host/CPU.html @@ -1 +1 @@ -CPU

CPU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total439 of 2,59983%60 of 39684%5423647413338
allocateFromAll(DataType, NDConfiguration, Object)16919453%203664%1429135701
_writeArray(Tensor, Object, int, int, int)7817168%61875%513114501
_readArray(Tensor, Class, int, int)3517583%31583%21043801
_virtualize(Tensor)3114982%92170%81611801
dispose()210%n/a115511
allocate(Class, int, Object)202757%2250%233901
allocate(Class, Object)201644%1150%123701
_allocate(Object)189483%21890%21121401
_actualize(Tensor)1423894%74386%72604101
_dataTypeOf(Object)136082%11794%11011101
_sizeOccupiedBy(Tensor)85787%21487%2911001
store(Tensor)7646%1150%121301
optimizedOperationOf(Function, String)40%n/a111111
_swap(Tensor, Tensor)0%n/a111111
_writeItem(Tensor, Object, int, int)244100%13597%11903801
_compactAndSizeObjectArray(DataType, Object[], int)220100%32100%01703501
allocateFromOne(DataType, NDConfiguration, Object)105100%18100%01001401
allocate(DataType, NDConfiguration)104100%42284%41402101
_readItem(Tensor, int)82100%16100%0901001
_readAll(Tensor, boolean)72100%11995%11101201
_autoConvertAndOptimizeObjectArray(Object[], DataType, int)29100%4100%030401
static {...}17100%n/a010501
toString()16100%n/a010101
free(Tensor)13100%n/a010301
lambda$_autoConvertAndOptimizeObjectArray$0(DataType, Object)11100%4100%030101
CPU()8100%n/a010201
_createDataFor(Object, DataType)7100%n/a010101
update(Component.OwnerChangeRequest)6100%n/a010201
lambda$_compactAndSizeObjectArray$5(Object[], int)6100%n/a010101
lambda$_compactAndSizeObjectArray$4(Object[], int)6100%n/a010101
lambda$_compactAndSizeObjectArray$3(Object[], int)6100%n/a010101
lambda$_compactAndSizeObjectArray$2(Object[], int)5100%n/a010101
lambda$_compactAndSizeObjectArray$1(int)4100%n/a010101
getExecutor()100%n/a010101
getCoreCount()100%n/a010101
get()100%n/a010101
_approveExecutionOf(Tensor[], int, Operation)100%n/a010101
restore(Tensor)100%n/a010101
\ No newline at end of file +CPU

CPU

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total463 of 2,59982%63 of 39684%5723652413438
allocateFromAll(DataType, NDConfiguration, Object)16919453%203664%1429135701
_writeArray(Tensor, Object, int, int, int)9615361%91562%713144501
_readArray(Tensor, Class, int, int)3517583%31583%21043801
_virtualize(Tensor)3114982%92170%81611801
dispose()210%n/a115511
allocate(Class, int, Object)202757%2250%233901
allocate(Class, Object)201644%1150%123701
_allocate(Object)189483%21890%21121401
_actualize(Tensor)1423894%74386%72604101
_dataTypeOf(Object)136082%11794%11011101
_sizeOccupiedBy(Tensor)85787%21487%2911001
store(Tensor)7646%1150%121301
update(Component.OwnerChangeRequest)60%n/a112211
optimizedOperationOf(Function, String)40%n/a111111
_swap(Tensor, Tensor)0%n/a111111
_writeItem(Tensor, Object, int, int)244100%13597%11903801
_compactAndSizeObjectArray(DataType, Object[], int)220100%32100%01703501
allocateFromOne(DataType, NDConfiguration, Object)105100%18100%01001401
allocate(DataType, NDConfiguration)104100%42284%41402101
_readItem(Tensor, int)82100%16100%0901001
_readAll(Tensor, boolean)72100%11995%11101201
_autoConvertAndOptimizeObjectArray(Object[], DataType, int)29100%4100%030401
static {...}17100%n/a010501
toString()16100%n/a010101
free(Tensor)13100%n/a010301
lambda$_autoConvertAndOptimizeObjectArray$0(DataType, Object)11100%4100%030101
CPU()8100%n/a010201
_createDataFor(Object, DataType)7100%n/a010101
lambda$_compactAndSizeObjectArray$5(Object[], int)6100%n/a010101
lambda$_compactAndSizeObjectArray$4(Object[], int)6100%n/a010101
lambda$_compactAndSizeObjectArray$3(Object[], int)6100%n/a010101
lambda$_compactAndSizeObjectArray$2(Object[], int)5100%n/a010101
lambda$_compactAndSizeObjectArray$1(int)4100%n/a010101
getExecutor()100%n/a010101
getCoreCount()100%n/a010101
get()100%n/a010101
_approveExecutionOf(Tensor[], int, Operation)100%n/a010101
restore(Tensor)100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host/CPU.java.html b/docs/coverage/test/html/neureka.devices.host/CPU.java.html index 7d2b6c012..fc852e285 100644 --- a/docs/coverage/test/html/neureka.devices.host/CPU.java.html +++ b/docs/coverage/test/html/neureka.devices.host/CPU.java.html @@ -246,11 +246,11 @@ protected final <T> void _writeArray( Tensor<T> tensor, Object array, int offset, int start, int size ) { - Object data = tensor.getMut().getData() == null ? null : tensor.getMut().getData().getOrNull(); - if ( data == null ) { - DataType<?> dataType = tensor.getDataType() != null ? tensor.getDataType() : _dataTypeOf(array); - tensor.getMut().setData( _createDataFor( array, (DataType<T>) dataType) ); - return; + Object data = tensor.getMut().getData() == null ? null : tensor.getMut().getData().getOrNull(); + if ( data == null ) { + DataType<?> dataType = tensor.getDataType() != null ? tensor.getDataType() : _dataTypeOf(array); + tensor.getMut().setData( _createDataFor( array, (DataType<T>) dataType) ); + return; } Class<?> arrayType = data.getClass(); if ( arrayType == float[].class ) { @@ -628,8 +628,8 @@ */ @Override public boolean update( OwnerChangeRequest<Tensor<Object>> changeRequest ) { - super.update( changeRequest ); - return false; // This type of device can not be a component simply because it is the default device + super.update( changeRequest ); + return false; // This type of device can not be a component simply because it is the default device } /** @@ -798,4 +798,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host/CPUData.html b/docs/coverage/test/html/neureka.devices.host/CPUData.html index 7b170d861..c9d9d52ba 100644 --- a/docs/coverage/test/html/neureka.devices.host/CPUData.html +++ b/docs/coverage/test/html/neureka.devices.host/CPUData.html @@ -1 +1 @@ -CPUData

CPUData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 8100%0 of 0n/a020302
CPUData(AbstractBaseDevice, Object, DataType)7100%n/a010201
lambda$new$0()1100%n/a010101
\ No newline at end of file +CPUData

CPUData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 8100%0 of 0n/a020302
CPUData(AbstractBaseDevice, Object, DataType)7100%n/a010201
lambda$new$0()1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host/CPUData.java.html b/docs/coverage/test/html/neureka.devices.host/CPUData.java.html index 4bdeedadc..3a65ddfd8 100644 --- a/docs/coverage/test/html/neureka.devices.host/CPUData.java.html +++ b/docs/coverage/test/html/neureka.devices.host/CPUData.java.html @@ -18,4 +18,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host/index.html b/docs/coverage/test/html/neureka.devices.host/index.html index 4c965edeb..e2990e7a9 100644 --- a/docs/coverage/test/html/neureka.devices.host/index.html +++ b/docs/coverage/test/html/neureka.devices.host/index.html @@ -1 +1 @@ -neureka.devices.host

neureka.devices.host

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total449 of 2,77183%62 of 40484%562554745135303
CPU4392,16083%6033684%542364741333801
CPU.JVMExecutor15493%675%21703501301
CPUData100%n/a02030201
\ No newline at end of file +neureka.devices.host

neureka.devices.host

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total473 of 2,77182%65 of 40483%592555245145303
CPU4632,13682%6333384%572365241343801
CPU.JVMExecutor15493%675%21703501301
CPUData100%n/a02030201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.host/index.source.html b/docs/coverage/test/html/neureka.devices.host/index.source.html index 0ae77679d..b00220fd6 100644 --- a/docs/coverage/test/html/neureka.devices.host/index.source.html +++ b/docs/coverage/test/html/neureka.devices.host/index.source.html @@ -1 +1 @@ -neureka.devices.host

neureka.devices.host

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total449 of 2,77183%62 of 40484%562554745135303
CPU.java4492,31483%6234284%562534744835102
CPUData.java100%n/a02030201
\ No newline at end of file +neureka.devices.host

neureka.devices.host

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total473 of 2,77182%65 of 40483%592555245145303
CPU.java4732,29082%6533983%592535244845102
CPUData.java100%n/a02030201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.html b/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.html index 8b5e5f6f6..a152eb31a 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.html @@ -1 +1 @@ -CLFunctionCompiler

CLFunctionCompiler

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total96 of 65385%12 of 2245%13368122225
_adHocKernelFor(ExecutionCall)3318985%3350%3444401
_clTypeOf(Tensor)192455%2250%232801
lambda$optimize$3(String[])182356%3125%231401
lambda$optimize$7(Function, ADTarget)140%n/a111111
optimize()46193%2250%2312201
lambda$_adHocKernelFor$12(Tensor)42887%1150%120601
lambda$optimize$8(Function, ExecutionCall)40%n/a111111
_readAndGetIndexMapper()34100%n/a010801
lambda$optimize$10(ExecutionCall)30100%1150%120601
lambda$_adHocKernelFor$18(List, int, int)28100%n/a010101
lambda$_adHocKernelFor$16(String, String)28100%n/a010301
CLFunctionCompiler(OpenCLDevice, Function, String)23100%n/a0101101
lambda$_adHocKernelFor$17(List, int)20100%n/a010101
lambda$_adHocKernelFor$15(List, int)16100%n/a010101
lambda$_adHocKernelFor$11(ExecutionCall, int)9100%n/a010101
lambda$_adHocKernelFor$14(int[])8100%n/a010301
lambda$optimize$9(Function, ExecutionCall)8100%n/a010201
lambda$_clTypeOf$19(Class)7100%n/a010301
lambda$_adHocKernelFor$13(Tensor)4100%n/a010101
lambda$new$1(Function)4100%n/a010101
lambda$optimize$6(ExecutionCall)3100%n/a010101
lambda$optimize$2(Function)3100%n/a010101
lambda$new$0(Function)3100%n/a010101
lambda$optimize$5(ExecutionCall)2100%n/a010101
lambda$optimize$4(ExecutionCall)2100%n/a010101
\ No newline at end of file +CLFunctionCompiler

CLFunctionCompiler

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total96 of 65385%12 of 2245%13368122225
_adHocKernelFor(ExecutionCall)3318985%3350%3444401
_clTypeOf(Tensor)192455%2250%232801
lambda$optimize$3(String[])182356%3125%231401
lambda$optimize$7(Function, ADTarget)140%n/a111111
optimize()46193%2250%2312201
lambda$_adHocKernelFor$12(Tensor)42887%1150%120601
lambda$optimize$8(Function, ExecutionCall)40%n/a111111
_readAndGetIndexMapper()34100%n/a010801
lambda$optimize$10(ExecutionCall)30100%1150%120601
lambda$_adHocKernelFor$18(List, int, int)28100%n/a010101
lambda$_adHocKernelFor$16(String, String)28100%n/a010301
CLFunctionCompiler(OpenCLDevice, Function, String)23100%n/a0101101
lambda$_adHocKernelFor$17(List, int)20100%n/a010101
lambda$_adHocKernelFor$15(List, int)16100%n/a010101
lambda$_adHocKernelFor$11(ExecutionCall, int)9100%n/a010101
lambda$_adHocKernelFor$14(int[])8100%n/a010301
lambda$optimize$9(Function, ExecutionCall)8100%n/a010201
lambda$_clTypeOf$19(Class)7100%n/a010301
lambda$_adHocKernelFor$13(Tensor)4100%n/a010101
lambda$new$1(Function)4100%n/a010101
lambda$optimize$6(ExecutionCall)3100%n/a010101
lambda$optimize$2(Function)3100%n/a010101
lambda$new$0(Function)3100%n/a010101
lambda$optimize$5(ExecutionCall)2100%n/a010101
lambda$optimize$4(ExecutionCall)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.java.html b/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.java.html index c6364ebe8..b1fd9d81a 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/CLFunctionCompiler.java.html @@ -236,4 +236,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.html b/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.html index 18347dbbc..7c1cf4231 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.html @@ -1 +1 @@ -DeviceQuery

DeviceQuery

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total56 of 93594%11 of 2657%1022311609
query()3971394%81260%81118601
getSizes(cl_device_id, int, int)174170%3350%2421101
getString(cl_device_id, int)36100%n/a010501
getString(cl_platform_id, int)36100%n/a010501
getInts(cl_device_id, int, int)16100%n/a010301
getLongs(cl_device_id, int, int)16100%n/a010301
getInt(cl_device_id, int)7100%n/a010101
getLong(cl_device_id, int)7100%n/a010101
getSize(cl_device_id, int)7100%n/a010101
\ No newline at end of file +DeviceQuery

DeviceQuery

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total935 of 9350%26 of 260%222211611699
query()7520%200%1111868611
getSizes(cl_device_id, int, int)580%60%44111111
getString(cl_device_id, int)360%n/a115511
getString(cl_platform_id, int)360%n/a115511
getInts(cl_device_id, int, int)160%n/a113311
getLongs(cl_device_id, int, int)160%n/a113311
getInt(cl_device_id, int)70%n/a111111
getLong(cl_device_id, int)70%n/a111111
getSize(cl_device_id, int)70%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.java.html b/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.java.html index 991fa0d12..ce6fba033 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/DeviceQuery.java.html @@ -28,166 +28,166 @@ */ public static String query() { - String result = "[DEVICE QUERY]:\n========================================================\n"; + String result = "[DEVICE QUERY]:\n========================================================\n"; // Obtain the number of platforms - int[] numPlatforms = new int[ 1 ]; - clGetPlatformIDs(0, null, numPlatforms); + int[] numPlatforms = new int[ 1 ]; + clGetPlatformIDs(0, null, numPlatforms); - result+=("Number of platforms: "+numPlatforms[ 0 ]+"\n"); + result+=("Number of platforms: "+numPlatforms[ 0 ]+"\n"); // Obtain the platform IDs - cl_platform_id[] platforms = new cl_platform_id[numPlatforms[ 0 ]]; - clGetPlatformIDs(platforms.length, platforms, null); + cl_platform_id[] platforms = new cl_platform_id[numPlatforms[ 0 ]]; + clGetPlatformIDs(platforms.length, platforms, null); // Collect all devices of all platforms - List<cl_device_id> devices = new ArrayList<cl_device_id>(); - for (cl_platform_id platform : platforms) + List<cl_device_id> devices = new ArrayList<cl_device_id>(); + for (cl_platform_id platform : platforms) { - String platformName = getString(platform, CL_PLATFORM_NAME); + String platformName = getString(platform, CL_PLATFORM_NAME); // Obtain the number of devices for the current platform - int[] numDevices = new int[ 1 ]; - clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 0, null, numDevices); + int[] numDevices = new int[ 1 ]; + clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 0, null, numDevices); - result += ("Number of devices in platform " + platformName + ": " + numDevices[ 0 ] + "\n"); + result += ("Number of devices in platform " + platformName + ": " + numDevices[ 0 ] + "\n"); - cl_device_id[] devicesArray = new cl_device_id[numDevices[ 0 ]]; - clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, numDevices[ 0 ], devicesArray, null); + cl_device_id[] devicesArray = new cl_device_id[numDevices[ 0 ]]; + clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, numDevices[ 0 ], devicesArray, null); - devices.addAll(Arrays.asList(devicesArray)); + devices.addAll(Arrays.asList(devicesArray)); } - result += "========================================================\n"; + result += "========================================================\n"; // Print the infos about all devices - for (cl_device_id device : devices) + for (cl_device_id device : devices) { // CL_DEVICE_NAME - String deviceName = getString(device, CL_DEVICE_NAME); - result+=("\n[Info for device "+deviceName+"]: \n--------------------------------------------------------\n"); - result+=("CL_DEVICE_NAME: "+deviceName+"\n"); + String deviceName = getString(device, CL_DEVICE_NAME); + result+=("\n[Info for device "+deviceName+"]: \n--------------------------------------------------------\n"); + result+=("CL_DEVICE_NAME: "+deviceName+"\n"); // CL_DEVICE_VENDOR - String deviceVendor = getString(device, CL_DEVICE_VENDOR); - result+=("CL_DEVICE_VENDOR: "+deviceVendor+"\n"); + String deviceVendor = getString(device, CL_DEVICE_VENDOR); + result+=("CL_DEVICE_VENDOR: "+deviceVendor+"\n"); // CL_DRIVER_VERSION - String driverVersion = getString(device, CL_DRIVER_VERSION); - result+=("CL_DRIVER_VERSION: "+driverVersion+"\n"); + String driverVersion = getString(device, CL_DRIVER_VERSION); + result+=("CL_DRIVER_VERSION: "+driverVersion+"\n"); // CL_DEVICE_TYPE - long deviceType = getLong(device, CL_DEVICE_TYPE); - if ( (deviceType & CL_DEVICE_TYPE_CPU) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_CPU\n"); - if ( (deviceType & CL_DEVICE_TYPE_GPU) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_GPU\n"); - if ( (deviceType & CL_DEVICE_TYPE_ACCELERATOR) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_ACCELERATOR\n"); - if ( (deviceType & CL_DEVICE_TYPE_DEFAULT) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_DEFAULT\n"); + long deviceType = getLong(device, CL_DEVICE_TYPE); + if ( (deviceType & CL_DEVICE_TYPE_CPU) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_CPU\n"); + if ( (deviceType & CL_DEVICE_TYPE_GPU) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_GPU\n"); + if ( (deviceType & CL_DEVICE_TYPE_ACCELERATOR) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_ACCELERATOR\n"); + if ( (deviceType & CL_DEVICE_TYPE_DEFAULT) != 0) result+=("CL_DEVICE_TYPE: CL_DEVICE_TYPE_DEFAULT\n"); // CL_DEVICE_MAX_COMPUTE_UNITS - int maxComputeUnits = getInt(device, CL_DEVICE_MAX_COMPUTE_UNITS); - result += ("CL_DEVICE_MAX_COMPUTE_UNITS: "+ maxComputeUnits+"\n"); + int maxComputeUnits = getInt(device, CL_DEVICE_MAX_COMPUTE_UNITS); + result += ("CL_DEVICE_MAX_COMPUTE_UNITS: "+ maxComputeUnits+"\n"); // CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS - long maxWorkItemDimensions = getLong(device, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS); - result += ("CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS: "+ maxWorkItemDimensions+"\n"); + long maxWorkItemDimensions = getLong(device, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS); + result += ("CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS: "+ maxWorkItemDimensions+"\n"); // CL_DEVICE_MAX_WORK_ITEM_SIZES - long[] maxWorkItemSizes = getSizes(device, CL_DEVICE_MAX_WORK_ITEM_SIZES, 3); - result += ("CL_DEVICE_MAX_WORK_ITEM_SIZES: "+maxWorkItemSizes[ 0 ]+", "+ maxWorkItemSizes[ 1 ]+", "+maxWorkItemSizes[ 2 ]+"\n"); + long[] maxWorkItemSizes = getSizes(device, CL_DEVICE_MAX_WORK_ITEM_SIZES, 3); + result += ("CL_DEVICE_MAX_WORK_ITEM_SIZES: "+maxWorkItemSizes[ 0 ]+", "+ maxWorkItemSizes[ 1 ]+", "+maxWorkItemSizes[ 2 ]+"\n"); // CL_DEVICE_MAX_WORK_GROUP_SIZE - long maxWorkGroupSize = getSize(device, CL_DEVICE_MAX_WORK_GROUP_SIZE); - result += ("CL_DEVICE_MAX_WORK_GROUP_SIZE: "+ maxWorkGroupSize+"\n"); + long maxWorkGroupSize = getSize(device, CL_DEVICE_MAX_WORK_GROUP_SIZE); + result += ("CL_DEVICE_MAX_WORK_GROUP_SIZE: "+ maxWorkGroupSize+"\n"); // CL_DEVICE_MAX_CLOCK_FREQUENCY - long maxClockFrequency = getLong(device, CL_DEVICE_MAX_CLOCK_FREQUENCY); - result += ("CL_DEVICE_MAX_CLOCK_FREQUENCY: "+ maxClockFrequency+" MHz\n"); + long maxClockFrequency = getLong(device, CL_DEVICE_MAX_CLOCK_FREQUENCY); + result += ("CL_DEVICE_MAX_CLOCK_FREQUENCY: "+ maxClockFrequency+" MHz\n"); // CL_DEVICE_ADDRESS_BITS - int addressBits = getInt(device, CL_DEVICE_ADDRESS_BITS); - result += ("CL_DEVICE_ADDRESS_BITS: "+ addressBits+"\n"); + int addressBits = getInt(device, CL_DEVICE_ADDRESS_BITS); + result += ("CL_DEVICE_ADDRESS_BITS: "+ addressBits+"\n"); // CL_DEVICE_MAX_MEM_ALLOC_SIZE - long maxMemAllocSize = getLong(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE); - result+=("CL_DEVICE_MAX_MEM_ALLOC_SIZE: "+ (int)(maxMemAllocSize / (1024 * 1024))+" MByte\n"); + long maxMemAllocSize = getLong(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE); + result+=("CL_DEVICE_MAX_MEM_ALLOC_SIZE: "+ (int)(maxMemAllocSize / (1024 * 1024))+" MByte\n"); // CL_DEVICE_GLOBAL_MEM_SIZE - long globalMemSize = getLong(device, CL_DEVICE_GLOBAL_MEM_SIZE); - result += ("CL_DEVICE_GLOBAL_MEM_SIZE: "+(int)(globalMemSize / (1024 * 1024))+" MByte\n"); + long globalMemSize = getLong(device, CL_DEVICE_GLOBAL_MEM_SIZE); + result += ("CL_DEVICE_GLOBAL_MEM_SIZE: "+(int)(globalMemSize / (1024 * 1024))+" MByte\n"); // CL_DEVICE_ERROR_CORRECTION_SUPPORT - int errorCorrectionSupport = getInt(device, CL_DEVICE_ERROR_CORRECTION_SUPPORT); - result += ("CL_DEVICE_ERROR_CORRECTION_SUPPORT: "+(errorCorrectionSupport != 0 ? "yes" : "no")+"\n"); + int errorCorrectionSupport = getInt(device, CL_DEVICE_ERROR_CORRECTION_SUPPORT); + result += ("CL_DEVICE_ERROR_CORRECTION_SUPPORT: "+(errorCorrectionSupport != 0 ? "yes" : "no")+"\n"); // CL_DEVICE_LOCAL_MEM_TYPE - int localMemType = getInt(device, CL_DEVICE_LOCAL_MEM_TYPE); - result += ("CL_DEVICE_LOCAL_MEM_TYPE: "+(localMemType == 1 ? "local" : "global")+"\n"); + int localMemType = getInt(device, CL_DEVICE_LOCAL_MEM_TYPE); + result += ("CL_DEVICE_LOCAL_MEM_TYPE: "+(localMemType == 1 ? "local" : "global")+"\n"); // CL_DEVICE_LOCAL_MEM_SIZE - long localMemSize = getLong(device, CL_DEVICE_LOCAL_MEM_SIZE); - result += ("CL_DEVICE_LOCAL_MEM_SIZE: "+(int)(localMemSize / 1024)+" KByte\n"); + long localMemSize = getLong(device, CL_DEVICE_LOCAL_MEM_SIZE); + result += ("CL_DEVICE_LOCAL_MEM_SIZE: "+(int)(localMemSize / 1024)+" KByte\n"); // CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE - long maxConstantBufferSize = getLong(device, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE); - result += ("CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE: "+(int)(maxConstantBufferSize / 1024)+" KByte\n"); + long maxConstantBufferSize = getLong(device, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE); + result += ("CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE: "+(int)(maxConstantBufferSize / 1024)+" KByte\n"); // CL_DEVICE_QUEUE_PROPERTIES - long queueProperties = getLong(device, CL_DEVICE_QUEUE_PROPERTIES); - if ( ( queueProperties & CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE ) != 0 ) + long queueProperties = getLong(device, CL_DEVICE_QUEUE_PROPERTIES); + if ( ( queueProperties & CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE ) != 0 ) result += ("CL_DEVICE_QUEUE_PROPERTIES: CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE\n"); - if(( queueProperties & CL_QUEUE_PROFILING_ENABLE ) != 0) - result += ("CL_DEVICE_QUEUE_PROPERTIES: CL_QUEUE_PROFILING_ENABLE\n"); + if(( queueProperties & CL_QUEUE_PROFILING_ENABLE ) != 0) + result += ("CL_DEVICE_QUEUE_PROPERTIES: CL_QUEUE_PROFILING_ENABLE\n"); // CL_DEVICE_IMAGE_SUPPORT - int imageSupport = getInt(device, CL_DEVICE_IMAGE_SUPPORT); - result += ("CL_DEVICE_IMAGE_SUPPORT: "+imageSupport+"\n"); + int imageSupport = getInt(device, CL_DEVICE_IMAGE_SUPPORT); + result += ("CL_DEVICE_IMAGE_SUPPORT: "+imageSupport+"\n"); // CL_DEVICE_MAX_READ_IMAGE_ARGS - int maxReadImageArgs = getInt(device, CL_DEVICE_MAX_READ_IMAGE_ARGS); - result += ("CL_DEVICE_MAX_READ_IMAGE_ARGS: "+maxReadImageArgs+"\n"); + int maxReadImageArgs = getInt(device, CL_DEVICE_MAX_READ_IMAGE_ARGS); + result += ("CL_DEVICE_MAX_READ_IMAGE_ARGS: "+maxReadImageArgs+"\n"); // CL_DEVICE_MAX_WRITE_IMAGE_ARGS - int maxWriteImageArgs = getInt(device, CL_DEVICE_MAX_WRITE_IMAGE_ARGS); - result += ("CL_DEVICE_MAX_WRITE_IMAGE_ARGS: "+maxWriteImageArgs+"\n"); + int maxWriteImageArgs = getInt(device, CL_DEVICE_MAX_WRITE_IMAGE_ARGS); + result += ("CL_DEVICE_MAX_WRITE_IMAGE_ARGS: "+maxWriteImageArgs+"\n"); // CL_DEVICE_SINGLE_FP_CONFIG - long singleFpConfig = getLong(device, CL_DEVICE_SINGLE_FP_CONFIG); - result+=("CL_DEVICE_SINGLE_FP_CONFIG: "+stringFor_cl_device_fp_config(singleFpConfig)+"\n"); + long singleFpConfig = getLong(device, CL_DEVICE_SINGLE_FP_CONFIG); + result+=("CL_DEVICE_SINGLE_FP_CONFIG: "+stringFor_cl_device_fp_config(singleFpConfig)+"\n"); // CL_DEVICE_IMAGE2D_MAX_WIDTH - long image2dMaxWidth = getSize(device, CL_DEVICE_IMAGE2D_MAX_WIDTH); - result += ("CL_DEVICE_2D_MAX_WIDTH "+image2dMaxWidth+"\n"); + long image2dMaxWidth = getSize(device, CL_DEVICE_IMAGE2D_MAX_WIDTH); + result += ("CL_DEVICE_2D_MAX_WIDTH "+image2dMaxWidth+"\n"); // CL_DEVICE_IMAGE2D_MAX_HEIGHT - long image2dMaxHeight = getSize(device, CL_DEVICE_IMAGE2D_MAX_HEIGHT); - result += ("CL_DEVICE_2D_MAX_HEIGHT "+image2dMaxHeight+"\n"); + long image2dMaxHeight = getSize(device, CL_DEVICE_IMAGE2D_MAX_HEIGHT); + result += ("CL_DEVICE_2D_MAX_HEIGHT "+image2dMaxHeight+"\n"); // CL_DEVICE_IMAGE3D_MAX_WIDTH - long image3dMaxWidth = getSize(device, CL_DEVICE_IMAGE3D_MAX_WIDTH); - result += ("CL_DEVICE_3D_MAX_WIDTH "+image3dMaxWidth+"\n"); + long image3dMaxWidth = getSize(device, CL_DEVICE_IMAGE3D_MAX_WIDTH); + result += ("CL_DEVICE_3D_MAX_WIDTH "+image3dMaxWidth+"\n"); // CL_DEVICE_IMAGE3D_MAX_HEIGHT - long image3dMaxHeight = getSize(device, CL_DEVICE_IMAGE3D_MAX_HEIGHT); - result+=("CL_DEVICE_3D_MAX_HEIGHT "+image3dMaxHeight+"\n"); + long image3dMaxHeight = getSize(device, CL_DEVICE_IMAGE3D_MAX_HEIGHT); + result+=("CL_DEVICE_3D_MAX_HEIGHT "+image3dMaxHeight+"\n"); // CL_DEVICE_IMAGE3D_MAX_DEPTH - long image3dMaxDepth = getSize(device, CL_DEVICE_IMAGE3D_MAX_DEPTH); - result += ("CL_DEVICE_3D_MAX_DEPTH "+image3dMaxDepth+"\n"); + long image3dMaxDepth = getSize(device, CL_DEVICE_IMAGE3D_MAX_DEPTH); + result += ("CL_DEVICE_3D_MAX_DEPTH "+image3dMaxDepth+"\n"); // CL_DEVICE_PREFERRED_VECTOR_WIDTH_<type> - result += ("CL_DEVICE_PREFERRED_VECTOR_WIDTH_<t>\n"); - int preferredVectorWidthChar = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR); - int preferredVectorWidthShort = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT); - int preferredVectorWidthInt = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT); - int preferredVectorWidthLong = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG); - int preferredVectorWidthFloat = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT); - int preferredVectorWidthDouble = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE); - result += ("CHAR "+preferredVectorWidthChar+ + result += ("CL_DEVICE_PREFERRED_VECTOR_WIDTH_<t>\n"); + int preferredVectorWidthChar = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR); + int preferredVectorWidthShort = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT); + int preferredVectorWidthInt = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT); + int preferredVectorWidthLong = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG); + int preferredVectorWidthFloat = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT); + int preferredVectorWidthDouble = getInt(device, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE); + result += ("CHAR "+preferredVectorWidthChar+ ", SHORT "+preferredVectorWidthShort+ ", INT "+preferredVectorWidthInt + ", LONG "+preferredVectorWidthLong+ ", FLOAT "+preferredVectorWidthFloat+ ", DOUBLE "+ preferredVectorWidthDouble+"\n"); - } - return result; + } + return result; } /** @@ -199,7 +199,7 @@ */ private static int getInt(cl_device_id device, int paramName) { - return getInts(device, paramName, 1)[ 0 ]; + return getInts(device, paramName, 1)[ 0 ]; } /** @@ -212,9 +212,9 @@ */ private static int[] getInts(cl_device_id device, int paramName, int numValues) { - int values[] = new int[numValues]; - clGetDeviceInfo(device, paramName, Sizeof.cl_int * numValues, Pointer.to(values), null); - return values; + int values[] = new int[numValues]; + clGetDeviceInfo(device, paramName, Sizeof.cl_int * numValues, Pointer.to(values), null); + return values; } /** @@ -226,7 +226,7 @@ */ private static long getLong(cl_device_id device, int paramName) { - return getLongs(device, paramName, 1)[ 0 ]; + return getLongs(device, paramName, 1)[ 0 ]; } /** @@ -239,9 +239,9 @@ */ private static long[] getLongs(cl_device_id device, int paramName, int numValues) { - long values[] = new long[numValues]; - clGetDeviceInfo(device, paramName, Sizeof.cl_long * numValues, Pointer.to(values), null); - return values; + long values[] = new long[numValues]; + clGetDeviceInfo(device, paramName, Sizeof.cl_long * numValues, Pointer.to(values), null); + return values; } /** @@ -254,15 +254,15 @@ private static String getString(cl_device_id device, int paramName) { // Obtain the length of the string that will be queried - long size[] = new long[ 1 ]; - clGetDeviceInfo(device, paramName, 0, null, size); + long size[] = new long[ 1 ]; + clGetDeviceInfo(device, paramName, 0, null, size); // Create a buffer of the appropriate size and fill it with the info - byte buffer[] = new byte[(int)size[ 0 ]]; - clGetDeviceInfo(device, paramName, buffer.length, Pointer.to(buffer), null); + byte buffer[] = new byte[(int)size[ 0 ]]; + clGetDeviceInfo(device, paramName, buffer.length, Pointer.to(buffer), null); // Create a string from the buffer (excluding the trailing \0 byte) - return new String(buffer, 0, buffer.length-1); + return new String(buffer, 0, buffer.length-1); } /** @@ -275,15 +275,15 @@ private static String getString(cl_platform_id platform, int paramName) { // Obtain the length of the string that will be queried - long size[] = new long[ 1 ]; - clGetPlatformInfo(platform, paramName, 0, null, size); + long size[] = new long[ 1 ]; + clGetPlatformInfo(platform, paramName, 0, null, size); // Create a buffer of the appropriate size and fill it with the info - byte buffer[] = new byte[(int)size[ 0 ]]; - clGetPlatformInfo(platform, paramName, buffer.length, Pointer.to(buffer), null); + byte buffer[] = new byte[(int)size[ 0 ]]; + clGetPlatformInfo(platform, paramName, buffer.length, Pointer.to(buffer), null); // Create a string from the buffer (excluding the trailing \0 byte) - return new String(buffer, 0, buffer.length-1); + return new String(buffer, 0, buffer.length-1); } /** @@ -295,7 +295,7 @@ */ private static long getSize(cl_device_id device, int paramName) { - return getSizes(device, paramName, 1)[ 0 ]; + return getSizes(device, paramName, 1)[ 0 ]; } /** @@ -310,20 +310,20 @@ { // The size of the returned data has to depend on // the size of a size_t, which is handled here - ByteBuffer buffer = ByteBuffer.allocate( - numValues * Sizeof.size_t).order(ByteOrder.nativeOrder()); - clGetDeviceInfo(device, paramName, Sizeof.size_t * numValues, - Pointer.to(buffer), null); - long values[] = new long[numValues]; - if (Sizeof.size_t == 4) + ByteBuffer buffer = ByteBuffer.allocate( + numValues * Sizeof.size_t).order(ByteOrder.nativeOrder()); + clGetDeviceInfo(device, paramName, Sizeof.size_t * numValues, + Pointer.to(buffer), null); + long values[] = new long[numValues]; + if (Sizeof.size_t == 4) for ( int i = 0; i < numValues; i++ ) values[ i ] = buffer.getInt(i * Sizeof.size_t); else - for ( int i = 0; i < numValues; i++ ) - values[ i ] = buffer.getLong(i * Sizeof.size_t); + for ( int i = 0; i < numValues; i++ ) + values[ i ] = buffer.getLong(i * Sizeof.size_t); - return values; + return values; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/Messages$Tips.html b/docs/coverage/test/html/neureka.devices.opencl.utility/Messages$Tips.html index b9f304c34..dfd7d39bc 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/Messages$Tips.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/Messages$Tips.html @@ -1 +1 @@ -Messages.Tips

Messages.Tips

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total1 of 6598%1 of 250%1411203
bootstrapTip()11894%1150%121301
static {...}35100%n/a010501
Messages.Tips(String, int, String, String)11100%n/a010401
\ No newline at end of file +Messages.Tips

Messages.Tips

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total1 of 8298%1 of 250%1411203
bootstrapTip()11894%1150%121301
static {...}52100%n/a010501
Messages.Tips(String, int, String, String)11100%n/a010401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.html b/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.html index 228b1f878..3b2d5d1ea 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.html @@ -1 +1 @@ -Messages

Messages

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total59 of 15461%6 of 1250%810173623
clContextCreationFailed()230%n/a114411
findTip()219581%6650%68102901
clContextCouldNotFindAnyDevices()150%n/a113311
\ No newline at end of file +Messages

Messages

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 15476%6 of 1250%710133613
findTip()219581%6650%68102901
clContextCouldNotFindAnyDevices()150%n/a113311
clContextCreationFailed()23100%n/a010401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.java.html b/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.java.html index c910a6227..aa29883c9 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/Messages.java.html @@ -15,11 +15,11 @@ private Messages() {/* This is a utility class! */} public static String clContextCreationFailed() { - return LogUtil.format( + return LogUtil.format( "OpenCL not available!\n" + - "Skipped creating and adding a new '"+ CLBackend.class.getSimpleName()+"' " + - "to the current '"+ BackendContext.class.getSimpleName()+"'...\n" + - findTip().bootstrapTip() + "Skipped creating and adding a new '"+ CLBackend.class.getSimpleName()+"' " + + "to the current '"+ BackendContext.class.getSimpleName()+"'...\n" + + findTip().bootstrapTip() ); } @@ -108,4 +108,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/index.html b/docs/coverage/test/html/neureka.devices.opencl.utility/index.html index 058322a1b..91a3a7f22 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/index.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/index.html @@ -1 +1 @@ -neureka.devices.opencl.utility

neureka.devices.opencl.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total212 of 1,80788%30 of 6251%32722928644004
CLFunctionCompiler9655785%121045%1336812222501
Messages599561%6650%81017362301
DeviceQuery5687994%111557%102231160901
Messages.Tips6498%1150%141120301
\ No newline at end of file +neureka.devices.opencl.utility

neureka.devices.opencl.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,068 of 1,82441%45 of 6227%4372138286124014
DeviceQuery9350%260%22221161169911
CLFunctionCompiler9655785%121045%1336812222501
Messages3611876%6650%71013361301
Messages.Tips8198%1150%141120301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl.utility/index.source.html b/docs/coverage/test/html/neureka.devices.opencl.utility/index.source.html index f9fc5deea..b84fabfca 100644 --- a/docs/coverage/test/html/neureka.devices.opencl.utility/index.source.html +++ b/docs/coverage/test/html/neureka.devices.opencl.utility/index.source.html @@ -1 +1 @@ -neureka.devices.opencl.utility

neureka.devices.opencl.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total212 of 1,80788%30 of 6251%32722928644004
CLFunctionCompiler.java9655785%121045%1336812222501
Messages.java6015972%7750%91418482602
DeviceQuery.java5687994%111557%102231160901
\ No newline at end of file +neureka.devices.opencl.utility

neureka.devices.opencl.utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,068 of 1,82441%45 of 6227%4372138286124014
DeviceQuery.java9350%260%22221161169911
CLFunctionCompiler.java9655785%121045%1336812222501
Messages.java3719984%7750%81414481602
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/JVMData.html b/docs/coverage/test/html/neureka.devices.opencl/JVMData.html index 5fa866801..99ce70d4e 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/JVMData.html +++ b/docs/coverage/test/html/neureka.devices.opencl/JVMData.html @@ -1 +1 @@ -JVMData

JVMData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total74 of 74790%11 of 14492%109011135018
of(Class, int)234767%31178%2831101
_fillArray(Object, int, int)1415291%13597%11913901
_allocArrayFromNumber(Number, int)137885%11191%1712501
getElementAt(int)46694%11191%171701
getLength()46093%11191%171701
getPointer()45493%11191%171701
lengthOf(Object)44792%11392%181801
getItemSize()43690%11191%171701
getType()43690%11191%171701
_preprocess(Object, int, int, boolean, boolean)27100%6100%040601
JVMData(Object, int, int, boolean, boolean)16100%n/a010401
of(Object)10100%n/a010101
of(Object, boolean)10100%n/a010101
isVirtual()10100%2100%020101
of(Object, int, boolean, boolean)9100%n/a010101
of(Object, int, int)9100%n/a010101
getArray()3100%n/a010101
getTargetLength()3100%n/a010101
\ No newline at end of file +JVMData

JVMData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total114 of 74784%15 of 14489%179016135418
of(Class, int)234767%31178%2831101
_fillArray(Object, int, int)1415291%13597%11913901
_allocArrayFromNumber(Number, int)137885%11191%1712501
of(Object, boolean)100%n/a111111
isVirtual()100%20%221111
of(Object, int, boolean, boolean)90%n/a111111
_preprocess(Object, int, int, boolean, boolean)81970%2466%241601
getElementAt(int)46694%11191%171701
getLength()46093%11191%171701
getPointer()45493%11191%171701
lengthOf(Object)44792%11392%181801
getItemSize()43690%11191%171701
getType()43690%11191%171701
getTargetLength()30%n/a111111
JVMData(Object, int, int, boolean, boolean)16100%n/a010401
of(Object)10100%n/a010101
of(Object, int, int)9100%n/a010101
getArray()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/JVMData.java.html b/docs/coverage/test/html/neureka.devices.opencl/JVMData.java.html index 7f45b2811..a5c32ca76 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/JVMData.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl/JVMData.java.html @@ -40,11 +40,11 @@ } public static JVMData of( Object data, boolean convertToFloat ) { - return new JVMData( data, 0, lengthOf(data), convertToFloat, false ); + return new JVMData( data, 0, lengthOf(data), convertToFloat, false ); } public static JVMData of( Object data, int size, boolean convertToFloat, boolean virtual ) { - return new JVMData( data, 0, size, convertToFloat, virtual ); + return new JVMData( data, 0, size, convertToFloat, virtual ); } public static JVMData of( Object data, int size, int start ) { @@ -60,13 +60,13 @@ private Object _preprocess( Object data, int start, int targetSize, boolean convertToFloat, boolean allowVirtual ) { - int size = allowVirtual ? lengthOf(data) : targetSize; + int size = allowVirtual ? lengthOf(data) : targetSize; if ( data instanceof Number ) data = _allocArrayFromNumber( (Number) data, size ); - if ( convertToFloat ) - data = DataConverter.get().convert( data, float[].class ); + if ( convertToFloat ) + data = DataConverter.get().convert( data, float[].class ); return _fillArray( data, start, size ); // Make sure the array is of the correct size! } @@ -169,7 +169,7 @@ throw new IllegalStateException(); } - public long getTargetLength() { return _size; } + public long getTargetLength() { return _size; } int getItemSize() { if ( _data instanceof float[] ) return Sizeof.cl_float; @@ -182,7 +182,7 @@ } boolean isVirtual() { - return _size != getLength(); + return _size != getLength(); } OpenCLDevice.cl_dtype getType() { @@ -217,4 +217,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/KernelCache$1.html b/docs/coverage/test/html/neureka.devices.opencl/KernelCache$1.html index fbee84120..ad63afd4a 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/KernelCache$1.html +++ b/docs/coverage/test/html/neureka.devices.opencl/KernelCache$1.html @@ -1 +1 @@ -KernelCache.new LinkedHashMap() {...}

KernelCache.new LinkedHashMap() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1586%1 of 250%130202
removeEldestEntry(Map.Entry)2675%1150%120101
{...}7100%n/a010101
\ No newline at end of file +KernelCache.new LinkedHashMap() {...}

KernelCache.new LinkedHashMap() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 150%2 of 20%332222
removeEldestEntry(Map.Entry)80%20%221111
{...}70%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/KernelCache.html b/docs/coverage/test/html/neureka.devices.opencl/KernelCache.html index 92d3faacd..8f10fc87f 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/KernelCache.html +++ b/docs/coverage/test/html/neureka.devices.opencl/KernelCache.html @@ -1 +1 @@ -KernelCache

KernelCache

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 28100%0 of 0n/a040604
KernelCache()10100%n/a010201
put(String, OpenCLDevice.cl_ad_hoc)7100%n/a010201
get(String)6100%n/a010101
has(String)5100%n/a010101
\ No newline at end of file +KernelCache

KernelCache

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 280%0 of 0n/a446644
KernelCache()100%n/a112211
put(String, OpenCLDevice.cl_ad_hoc)70%n/a112211
get(String)60%n/a111111
has(String)50%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/KernelCache.java.html b/docs/coverage/test/html/neureka.devices.opencl/KernelCache.java.html index be3443c86..410c9caf7 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/KernelCache.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl/KernelCache.java.html @@ -12,30 +12,30 @@ * anywhere but within this library. <br> * This class or its public methods might change or get removed in future versions!</b> */ -public final class KernelCache { +public final class KernelCache { private final static int CAPACITY = 256; - private final Map<String, OpenCLDevice.cl_ad_hoc> _adhocKernels = - new LinkedHashMap<String, OpenCLDevice.cl_ad_hoc>(CAPACITY) { + private final Map<String, OpenCLDevice.cl_ad_hoc> _adhocKernels = + new LinkedHashMap<String, OpenCLDevice.cl_ad_hoc>(CAPACITY) { @Override protected boolean removeEldestEntry(final Map.Entry eldest) { - return size() > CAPACITY; + return size() > CAPACITY; } }; public void put( String name, OpenCLDevice.cl_ad_hoc kernel ) { // Storing the ad hoc object in a fixed size map for fast access by operations: - _adhocKernels.put( name, kernel ); - } + _adhocKernels.put( name, kernel ); + } public boolean has( String name ) { - return _adhocKernels.containsKey( name ); + return _adhocKernels.containsKey( name ); } public OpenCLDevice.cl_ad_hoc get( String name ) { - return _adhocKernels.get( name ); + return _adhocKernels.get( name ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.html b/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.html index 15dd5b5d1..cc00c2332 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.html +++ b/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.html @@ -1 +1 @@ -KernelCaller

KernelCaller

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total97 of 55582%11 of 3063%9381177123
_releaseEvents(Tensor[])450%40%335511
call(long[], long[])173567%2466%242701
pass(Number)144877%11191%171701
call(int)133271%1150%122601
_getWaitList(Tensor[])83782%3350%241601
pass(Tensor)33100%n/a010401
passAllOf(Tensor)32100%n/a010401
passConfOf(Tensor)25100%n/a010401
pass(int[])21100%n/a010301
pass(float[])21100%n/a010301
pass(double[])21100%n/a010301
pass(short[])21100%n/a010301
pass(long[])21100%n/a010301
pass(byte[])21100%n/a010301
passLocalFloats(long)18100%n/a010301
KernelCaller(cl_kernel, cl_command_queue)17100%n/a010601
pass(int)9100%n/a010101
pass(float)9100%n/a010101
pass(double)9100%n/a010101
pass(short)9100%n/a010101
pass(long)9100%n/a010101
pass(byte)9100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +KernelCaller

KernelCaller

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total554 of 5550%30 of 300%373876772223
pass(Number)620%120%777711
call(long[], long[])520%60%447711
call(int)450%20%226611
_releaseEvents(Tensor[])450%40%335511
_getWaitList(Tensor[])450%60%446611
pass(Tensor)330%n/a114411
passAllOf(Tensor)320%n/a114411
passConfOf(Tensor)250%n/a114411
pass(int[])210%n/a113311
pass(float[])210%n/a113311
pass(double[])210%n/a113311
pass(short[])210%n/a113311
pass(long[])210%n/a113311
pass(byte[])210%n/a113311
passLocalFloats(long)180%n/a113311
KernelCaller(cl_kernel, cl_command_queue)170%n/a116611
pass(int)90%n/a111111
pass(float)90%n/a111111
pass(double)90%n/a111111
pass(short)90%n/a111111
pass(long)90%n/a111111
pass(byte)90%n/a111111
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.java.html b/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.java.html index 3b1273d10..f9c2da5c1 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl/KernelCaller.java.html @@ -19,18 +19,18 @@ private final cl_kernel _kernel; private final List<Tensor<Number>> _inputs; - private int _argId = 0; + private int _argId = 0; /** * * @param kernel The kernel which ought to be called. * @param queue The queue on which calls ought to be dispatched. */ - public KernelCaller( cl_kernel kernel, cl_command_queue queue ) { - _queue = queue; - _kernel = kernel; - _inputs = new ArrayList<>(); - } + public KernelCaller( cl_kernel kernel, cl_command_queue queue ) { + _queue = queue; + _kernel = kernel; + _inputs = new ArrayList<>(); + } /** * This method passes 2 arguments to the kernel. @@ -39,10 +39,10 @@ * @return This very KernelCaller instance (factory pattern). */ public KernelCaller passAllOf( Tensor<Number> tensor ) { - _inputs.add( tensor ); - clSetKernelArg( _kernel, _argId, Sizeof.cl_mem, Pointer.to( tensor.getMut().getData().as( OpenCLDevice.cl_tsr.class ).value.data ) ); - _argId++; - return passConfOf( tensor ); + _inputs.add( tensor ); + clSetKernelArg( _kernel, _argId, Sizeof.cl_mem, Pointer.to( tensor.getMut().getData().as( OpenCLDevice.cl_tsr.class ).value.data ) ); + _argId++; + return passConfOf( tensor ); } /** @@ -55,10 +55,10 @@ * @return This very KernelCaller instance (factory pattern). */ public KernelCaller passConfOf( Tensor<Number> tensor ) { - OpenCLDevice device = (OpenCLDevice) tensor.getDevice(); - clSetKernelArg( _kernel, _argId, Sizeof.cl_mem, Pointer.to( device.clConfigOf(tensor ).data ) ); - _argId++; - return this; + OpenCLDevice device = (OpenCLDevice) tensor.getDevice(); + clSetKernelArg( _kernel, _argId, Sizeof.cl_mem, Pointer.to( device.clConfigOf(tensor ).data ) ); + _argId++; + return this; } /** @@ -68,10 +68,10 @@ * @return This very KernelCaller instance (factory pattern). */ public <T extends Number> KernelCaller pass( Tensor<T> tensor ) { - _inputs.add( tensor.getMut().upcast(Number.class) ); - clSetKernelArg( _kernel, _argId, Sizeof.cl_mem, Pointer.to( tensor.getMut().getData().as( OpenCLDevice.cl_tsr.class ).value.data ) ); - _argId++; - return this; + _inputs.add( tensor.getMut().upcast(Number.class) ); + clSetKernelArg( _kernel, _argId, Sizeof.cl_mem, Pointer.to( tensor.getMut().getData().as( OpenCLDevice.cl_tsr.class ).value.data ) ); + _argId++; + return this; } /** @@ -80,7 +80,7 @@ * @return This very KernelCaller instance (factory pattern). */ public KernelCaller pass( int value ) { - return this.pass( new int[]{ value } ); + return this.pass( new int[]{ value } ); } /** @@ -90,9 +90,9 @@ * @return This very KernelCaller instance (factory pattern). */ public KernelCaller pass( int... values ) { - clSetKernelArg( _kernel, _argId, Sizeof.cl_int * (long) values.length, Pointer.to( values ) ); - _argId++; - return this; + clSetKernelArg( _kernel, _argId, Sizeof.cl_int * (long) values.length, Pointer.to( values ) ); + _argId++; + return this; } /** @@ -102,33 +102,33 @@ * @return This very KernelCaller instance (factory pattern). */ public KernelCaller pass( float... values ) { - clSetKernelArg( _kernel, _argId, Sizeof.cl_float * (long) values.length, Pointer.to( values ) ); - _argId++; - return this; + clSetKernelArg( _kernel, _argId, Sizeof.cl_float * (long) values.length, Pointer.to( values ) ); + _argId++; + return this; } public KernelCaller pass( double... values ) { - clSetKernelArg( _kernel, _argId, Sizeof.cl_double * (long) values.length, Pointer.to( values ) ); - _argId++; - return this; + clSetKernelArg( _kernel, _argId, Sizeof.cl_double * (long) values.length, Pointer.to( values ) ); + _argId++; + return this; } public KernelCaller pass( short... values ) { - clSetKernelArg( _kernel, _argId, Sizeof.cl_short * (long) values.length, Pointer.to( values ) ); - _argId++; - return this; + clSetKernelArg( _kernel, _argId, Sizeof.cl_short * (long) values.length, Pointer.to( values ) ); + _argId++; + return this; } public KernelCaller pass( long... values ) { - clSetKernelArg( _kernel, _argId, Sizeof.cl_long * (long) values.length, Pointer.to( values ) ); - _argId++; - return this; + clSetKernelArg( _kernel, _argId, Sizeof.cl_long * (long) values.length, Pointer.to( values ) ); + _argId++; + return this; } public KernelCaller pass( byte... values ) { - clSetKernelArg( _kernel, _argId, Sizeof.cl_char * (long) values.length, Pointer.to( values ) ); - _argId++; - return this; + clSetKernelArg( _kernel, _argId, Sizeof.cl_char * (long) values.length, Pointer.to( values ) ); + _argId++; + return this; } /** @@ -136,39 +136,39 @@ * @return This very KernelCaller instance (factory pattern). */ public KernelCaller pass( float value ) { - return this.pass( new float[]{ value } ); + return this.pass( new float[]{ value } ); } public KernelCaller pass( double value ) { - return this.pass( new double[]{ value } ); + return this.pass( new double[]{ value } ); } public KernelCaller pass( short value ) { - return this.pass( new short[]{ value } ); + return this.pass( new short[]{ value } ); } public KernelCaller pass( long value ) { - return this.pass( new long[]{ value } ); + return this.pass( new long[]{ value } ); } public KernelCaller pass( byte value ) { - return this.pass( new byte[]{ value } ); + return this.pass( new byte[]{ value } ); } public KernelCaller pass( Number value ) { - if ( value instanceof Float ) return this.pass( value.floatValue() ); - else if ( value instanceof Double ) return this.pass( value.doubleValue() ); - else if ( value instanceof Integer ) return this.pass( value.intValue() ); - else if ( value instanceof Long ) return this.pass( value.longValue() ); - else if ( value instanceof Short ) return this.pass( value.shortValue() ); - else if ( value instanceof Byte ) return this.pass( value.byteValue() ); + if ( value instanceof Float ) return this.pass( value.floatValue() ); + else if ( value instanceof Double ) return this.pass( value.doubleValue() ); + else if ( value instanceof Integer ) return this.pass( value.intValue() ); + else if ( value instanceof Long ) return this.pass( value.longValue() ); + else if ( value instanceof Short ) return this.pass( value.shortValue() ); + else if ( value instanceof Byte ) return this.pass( value.byteValue() ); else throw new IllegalArgumentException( "Unsupported number type: " + value.getClass().getName() ); } public KernelCaller passLocalFloats( long size ) { - clSetKernelArg( _kernel, _argId, Sizeof.cl_float * (long) size, null ); - _argId++; - return this; + clSetKernelArg( _kernel, _argId, Sizeof.cl_float * (long) size, null ); + _argId++; + return this; } /** @@ -177,12 +177,12 @@ */ public void call( int globalWorkSize ) { - cl_event[] events = _getWaitList( _inputs.toArray( new Tensor[ 0 ] ) ); - if ( events.length > 0 ) { + cl_event[] events = _getWaitList( _inputs.toArray( new Tensor[ 0 ] ) ); + if ( events.length > 0 ) { clWaitForEvents( events.length, events ); _releaseEvents( _inputs.toArray( new Tensor[ 0 ] ) ); } - clEnqueueNDRangeKernel( + clEnqueueNDRangeKernel( _queue, _kernel, 1, null, @@ -192,7 +192,7 @@ null, null ); - } + } /** * Use this to call the kernel with 2 long arrays defining how the kernel should be indexed and parallelized. @@ -215,13 +215,13 @@ */ public void call( long[] globalWorkSizes, long[] localWorkSizes ) { - cl_event[] events = _getWaitList( _inputs.toArray( new Tensor[ 0 ] ) ); - if ( events.length > 0 ) { + cl_event[] events = _getWaitList( _inputs.toArray( new Tensor[ 0 ] ) ); + if ( events.length > 0 ) { clWaitForEvents( events.length, events ); _releaseEvents( _inputs.toArray( new Tensor[ 0 ] ) ); } - assert localWorkSizes == null || globalWorkSizes.length == localWorkSizes.length; - clEnqueueNDRangeKernel( + assert localWorkSizes == null || globalWorkSizes.length == localWorkSizes.length; + clEnqueueNDRangeKernel( _queue, _kernel, globalWorkSizes.length, null, @@ -231,7 +231,7 @@ null, null ); - } + } private void _releaseEvents( Tensor<Number>[] tensors ) { @@ -245,15 +245,15 @@ private cl_event[] _getWaitList( Tensor<Number>[] tensors ) { - List<cl_event> list = new ArrayList<>(); - for ( Tensor<Number> t : tensors ) { - cl_event event = t.getMut().getData().as( OpenCLDevice.cl_tsr.class ).value.event; - if ( event != null && !list.contains(event) ) { + List<cl_event> list = new ArrayList<>(); + for ( Tensor<Number> t : tensors ) { + cl_event event = t.getMut().getData().as( OpenCLDevice.cl_tsr.class ).value.event; + if ( event != null && !list.contains(event) ) { list.add( event ); } } - return list.toArray( new cl_event[ 0 ] ); + return list.toArray( new cl_event[ 0 ] ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/KernelCode.html b/docs/coverage/test/html/neureka.devices.opencl/KernelCode.html index 3777fbbae..28f756e56 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/KernelCode.html +++ b/docs/coverage/test/html/neureka.devices.opencl/KernelCode.html @@ -1 +1 @@ -KernelCode

KernelCode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 6046%6 of 60%51051527
equals(Object)230%60%444411
hashCode()90%n/a111111
KernelCode(String, String, DataType)12100%n/a010501
KernelCode(String, String)7100%n/a010201
getName()3100%n/a010101
getCode()3100%n/a010101
getDataType()3100%n/a010101
\ No newline at end of file +KernelCode

KernelCode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total60 of 600%6 of 60%1010151577
equals(Object)230%60%444411
KernelCode(String, String, DataType)120%n/a115511
hashCode()90%n/a111111
KernelCode(String, String)70%n/a112211
getName()30%n/a111111
getCode()30%n/a111111
getDataType()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/KernelCode.java.html b/docs/coverage/test/html/neureka.devices.opencl/KernelCode.java.html index 800391bb1..250a6e9a8 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/KernelCode.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl/KernelCode.java.html @@ -11,20 +11,20 @@ private final DataType<?> _dataType; public KernelCode( String name, String code ) { - this( name, code, DataType.of(Float.class) ); - } + this( name, code, DataType.of(Float.class) ); + } - public KernelCode( String name, String code, DataType<?> dataType ) { - _name = name; - _code = code; - _dataType = dataType; - } + public KernelCode( String name, String code, DataType<?> dataType ) { + _name = name; + _code = code; + _dataType = dataType; + } - public String getName() { return _name; } + public String getName() { return _name; } - public String getCode() { return _code; } + public String getCode() { return _code; } - public DataType<?> getDataType() { return _dataType; } + public DataType<?> getDataType() { return _dataType; } @Override public boolean equals( Object o ) { @@ -40,4 +40,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$CLData.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$CLData.html index e273bf8d8..ff1eae96e 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$CLData.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$CLData.html @@ -1 +1 @@ -OpenCLDevice.CLData

OpenCLDevice.CLData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 4190%1 of 475%150803
OpenCLDevice.CLData(AbstractBaseDevice, Object, DataType)41275%1150%120301
lambda$new$0(Object)24100%2100%020401
static {...}1100%n/a010101
\ No newline at end of file +OpenCLDevice.CLData

OpenCLDevice.CLData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total41 of 410%4 of 40%558833
lambda$new$0(Object)240%20%224411
OpenCLDevice.CLData(AbstractBaseDevice, Object, DataType)160%20%223311
static {...}10%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Query.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Query.html index 73c901a24..3becbc449 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Query.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Query.html @@ -1 +1 @@ -OpenCLDevice.Query

OpenCLDevice.Query

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total56 of 19270%3 of 650%413831210
getString(cl_platform_id, int)360%n/a115511
getLongs(int, ByteBuffer, long[])172054%3350%242601
OpenCLDevice.Query()30%n/a111111
getString(cl_device_id, int)36100%n/a010501
getSizes(cl_device_id, int, int)27100%n/a010501
getInts(cl_device_id, int, int)16100%n/a010301
getLongs(cl_device_id, int, int)16100%n/a010301
getInt(cl_device_id, int)7100%n/a010101
getLong(cl_device_id, int)7100%n/a010101
getSize(cl_device_id, int)7100%n/a010101
\ No newline at end of file +OpenCLDevice.Query

OpenCLDevice.Query

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total192 of 1920%6 of 60%131331311010
getLongs(int, ByteBuffer, long[])370%60%446611
getString(cl_device_id, int)360%n/a115511
getString(cl_platform_id, int)360%n/a115511
getSizes(cl_device_id, int, int)270%n/a115511
getInts(cl_device_id, int, int)160%n/a113311
getLongs(cl_device_id, int, int)160%n/a113311
getInt(cl_device_id, int)70%n/a111111
getLong(cl_device_id, int)70%n/a111111
getSize(cl_device_id, int)70%n/a111111
OpenCLDevice.Query()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Type.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Type.html index 339dc0b6d..05cee36f6 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Type.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$Type.html @@ -1 +1 @@ -OpenCLDevice.Type

OpenCLDevice.Type

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 45100%0 of 0n/a010201
static {...}45100%n/a010201
\ No newline at end of file +OpenCLDevice.Type

OpenCLDevice.Type

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total74 of 740%0 of 0n/a112211
static {...}740%n/a112211
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_ad_hoc.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_ad_hoc.html index 0e2303183..18b74aa13 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_ad_hoc.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_ad_hoc.html @@ -1 +1 @@ -OpenCLDevice.cl_ad_hoc

OpenCLDevice.cl_ad_hoc

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a010501
OpenCLDevice.cl_ad_hoc(String, cl_kernel, cl_program)12100%n/a010501
\ No newline at end of file +OpenCLDevice.cl_ad_hoc

OpenCLDevice.cl_ad_hoc

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 120%0 of 0n/a115511
OpenCLDevice.cl_ad_hoc(String, cl_kernel, cl_program)120%n/a115511
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_config.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_config.html index 8ce569629..d09c7f7aa 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_config.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_config.html @@ -1 +1 @@ -OpenCLDevice.cl_config

OpenCLDevice.cl_config

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 3100%0 of 0n/a010101
OpenCLDevice.cl_config()3100%n/a010101
\ No newline at end of file +OpenCLDevice.cl_config

OpenCLDevice.cl_config

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 30%0 of 0n/a111111
OpenCLDevice.cl_config()30%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_dtype.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_dtype.html index d1281ad5b..0c920479f 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_dtype.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_dtype.html @@ -1 +1 @@ -OpenCLDevice.cl_dtype

OpenCLDevice.cl_dtype

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 57100%0 of 0n/a010101
static {...}57100%n/a010101
\ No newline at end of file +OpenCLDevice.cl_dtype

OpenCLDevice.cl_dtype

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 94100%0 of 0n/a010101
static {...}94100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr$cl_value.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr$cl_value.html index 73d7f2e76..1d7e80e09 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr$cl_value.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr$cl_value.html @@ -1 +1 @@ -OpenCLDevice.cl_tsr.cl_value

OpenCLDevice.cl_tsr.cl_value

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010101
OpenCLDevice.cl_tsr.cl_value(int)6100%n/a010101
\ No newline at end of file +OpenCLDevice.cl_tsr.cl_value

OpenCLDevice.cl_tsr.cl_value

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total6 of 60%0 of 0n/a111111
OpenCLDevice.cl_tsr.cl_value(int)60%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr.html index 1c2695d2f..9ad0f1a5c 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice$cl_tsr.html @@ -1 +1 @@ -OpenCLDevice.cl_tsr

OpenCLDevice.cl_tsr

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2487%2 of 450%240602
equals(Object)31280%2250%230201
OpenCLDevice.cl_tsr(OpenCLDevice.cl_tsr.cl_value, OpenCLDevice.cl_dtype)9100%n/a010401
\ No newline at end of file +OpenCLDevice.cl_tsr

OpenCLDevice.cl_tsr

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 280%4 of 40%557733
equals(Object)150%40%332211
OpenCLDevice.cl_tsr(OpenCLDevice.cl_tsr.cl_value, OpenCLDevice.cl_dtype)90%n/a114411
hashCode()40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.html index 0e501858d..3d0d3cbc8 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.html @@ -1 +1 @@ -OpenCLDevice

OpenCLDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total344 of 1,58578%32 of 9566%49138502912188
getKernel(ExecutionCall)384956%5550%5631101
type()361934%10216%675801
compileAdHocKernel(String, String)3212279%4660%4632501
_swap(Tensor, Tensor)230%n/a114411
_dataTypeOf(Object)203664%2777%2821201
allocate(DataType, NDConfiguration)170%n/a113311
getAdHocKernel(String)141551%1150%121301
getKernel(String)141450%1150%121401
allocateFromAll(DataType, NDConfiguration, Object)140%n/a113311
_add(Tensor, Data, Runnable)139087%3975%3742701
lambda$compileAndGetAdHocKernel$1(String)120%n/a111111
maxConstantBufferSizeKB()90%n/a111111
lambda$free$7(Tensor, Device)71466%1150%123701
lambda$compileAdHocKernel$2(cl_kernel, cl_program)70%n/a113311
maxWorkItemSizes()60%n/a111111
_actualize(Tensor)53487%1150%120701
_virtualize(Tensor)53487%1150%120701
maxComputeUnits()50%n/a111111
maxWorkItemSimensions()50%n/a111111
maxAddressBits()50%n/a111111
maxMemAllocSize()50%n/a111111
errorCorrectionSupport()50%n/a111111
localMemType()50%n/a111111
localMemSize()50%n/a111111
imageSupport()50%n/a111111
maxReadImageArgs()50%n/a111111
singleFPConfig()50%n/a111111
lambda$_add$5()50%n/a111111
lambda$_store$3(Tensor)50%n/a111111
lambda$new$0()50%n/a111111
getId()30%n/a111111
free(Tensor)22392%1150%120501
findAdHocKernel(String)21688%1150%121301
_store(JVMData, OpenCLDevice.cl_tsr, boolean)68100%4100%0301501
_overwrite(Tensor, long, JVMData)65100%4100%030901
restore(Tensor)61100%4100%0301101
_writeNewNDConfig(NDConfiguration)49100%n/a010901
_read(JVMData, Tensor, int)34100%n/a010601
OpenCLDevice(OpenCLPlatform, cl_device_id)32100%n/a010901
_storeNew(JVMData, boolean)26100%2100%020401
update(Component.OwnerChangeRequest)26100%2100%020701
findOrCompileAdHocKernel(String, Supplier)22100%2100%020301
toString()21100%n/a010101
_store(Tensor, Tensor)21100%2100%020501
clConfigOf(NDConfiguration)20100%2100%020501
_findRoot(Tensor)20100%2100%020501
allocateFromOne(DataType, NDConfiguration, Number)18100%n/a010301
_readAll(Tensor, boolean)18100%n/a010201
_updateInternal(Tensor, Runnable)17100%2100%020401
of(OpenCLPlatform, cl_device_id)16100%1150%120201
_readItem(Tensor, int)14100%n/a010101
_readArray(Tensor, Class, int, int)13100%n/a010101
dispose()12100%n/a010401
compileAndGetAdHocKernel(String, String)11100%n/a010301
lambda$free$9(Tensor, Device)11100%n/a010301
_writeItem(Tensor, Number, int, int)10100%n/a010201
_writeArray(Tensor, Object, int, int, int)10100%n/a010201
_sizeOccupiedBy(Tensor)9100%n/a010101
optimizedOperationOf(Function, String)8100%n/a010101
lambda$free$8(Tensor)8100%n/a010101
_dataArrayOf(Object, DataType)7100%n/a010101
hasAdHocKernel(String)5100%n/a010101
_storeNew(JVMData)5100%n/a010101
clConfigOf(Tensor)5100%n/a010101
name()5100%n/a010101
vendor()5100%n/a010101
version()5100%n/a010101
maxWorkGroupSize()5100%n/a010101
maxClockFrequenzy()5100%n/a010101
globalMemSize()5100%n/a010101
maxConstantBufferSize()5100%n/a010101
maxWriteImageArgs()5100%n/a010101
image2DMaxWidth()5100%n/a010101
image2DMaxHeight()5100%n/a010101
image3DMaxWidth()5100%n/a010101
image3DMaxHeight()5100%n/a010101
image3DMaxDepth()5100%n/a010101
prefVecWidthChar()5100%n/a010101
prefVecWidthShort()5100%n/a010101
prefVecWidthInt()5100%n/a010101
prefVecWidthLong()5100%n/a010101
prefVecWidthFloat()5100%n/a010101
prefVecWidthDouble()5100%n/a010101
lambda$_add$4(CLBackend)5100%n/a010101
lambda$_writeNewNDConfig$6(cl_mem)4100%n/a010101
static {...}4100%n/a010101
getPlatform()3100%n/a010101
_approveExecutionOf(Tensor[], int, Operation)2100%n/a010101
\ No newline at end of file +OpenCLDevice

OpenCLDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total1,582 of 1,5860%95 of 950%1371382902918788
compileAdHocKernel(String, String)1540%100%66252511
_add(Tensor, Data, Runnable)1030%120%77272711
getKernel(ExecutionCall)870%100%66111111
_store(JVMData, OpenCLDevice.cl_tsr, boolean)680%40%33151511
_overwrite(Tensor, long, JVMData)650%40%339911
restore(Tensor)610%40%33111111
_dataTypeOf(Object)560%90%88121211
type()550%120%778811
_writeNewNDConfig(NDConfiguration)490%n/a119911
_actualize(Tensor)390%20%227711
_virtualize(Tensor)390%20%227711
_read(JVMData, Tensor, int)340%n/a116611
OpenCLDevice(OpenCLPlatform, cl_device_id)320%n/a119911
getAdHocKernel(String)290%20%223311
getKernel(String)280%20%224411
_storeNew(JVMData, boolean)260%20%224411
update(Component.OwnerChangeRequest)260%20%227711
free(Tensor)250%20%225511
_swap(Tensor, Tensor)230%n/a114411
findOrCompileAdHocKernel(String, Supplier)220%20%223311
toString()210%n/a111111
_store(Tensor, Tensor)210%20%225511
lambda$free$7(Tensor, Device)210%20%227711
clConfigOf(NDConfiguration)200%20%225511
_findRoot(Tensor)200%20%225511
findAdHocKernel(String)180%20%223311
allocateFromOne(DataType, NDConfiguration, Number)180%n/a113311
_readAll(Tensor, boolean)180%n/a112211
allocate(DataType, NDConfiguration)170%n/a113311
_updateInternal(Tensor, Runnable)170%20%224411
of(OpenCLPlatform, cl_device_id)160%20%222211
_readItem(Tensor, int)140%n/a111111
allocateFromAll(DataType, NDConfiguration, Object)140%n/a113311
_readArray(Tensor, Class, int, int)130%n/a111111
dispose()120%n/a114411
lambda$compileAndGetAdHocKernel$1(String)120%n/a111111
compileAndGetAdHocKernel(String, String)110%n/a113311
lambda$free$9(Tensor, Device)110%n/a113311
_writeItem(Tensor, Number, int, int)100%n/a112211
_writeArray(Tensor, Object, int, int, int)100%n/a112211
_sizeOccupiedBy(Tensor)90%n/a111111
maxConstantBufferSizeKB()90%n/a111111
optimizedOperationOf(Function, String)80%n/a111111
lambda$free$8(Tensor)80%n/a111111
_dataArrayOf(Object, DataType)70%n/a111111
lambda$compileAdHocKernel$2(cl_kernel, cl_program)70%n/a113311
maxWorkItemSizes()60%n/a111111
lambda$_store$3(Tensor)60%n/a111111
hasAdHocKernel(String)50%n/a111111
_storeNew(JVMData)50%n/a111111
clConfigOf(Tensor)50%n/a111111
name()50%n/a111111
vendor()50%n/a111111
version()50%n/a111111
maxComputeUnits()50%n/a111111
maxWorkItemSimensions()50%n/a111111
maxWorkGroupSize()50%n/a111111
maxClockFrequenzy()50%n/a111111
maxAddressBits()50%n/a111111
maxMemAllocSize()50%n/a111111
globalMemSize()50%n/a111111
errorCorrectionSupport()50%n/a111111
localMemType()50%n/a111111
localMemSize()50%n/a111111
maxConstantBufferSize()50%n/a111111
imageSupport()50%n/a111111
maxReadImageArgs()50%n/a111111
maxWriteImageArgs()50%n/a111111
singleFPConfig()50%n/a111111
image2DMaxWidth()50%n/a111111
image2DMaxHeight()50%n/a111111
image3DMaxWidth()50%n/a111111
image3DMaxHeight()50%n/a111111
image3DMaxDepth()50%n/a111111
prefVecWidthChar()50%n/a111111
prefVecWidthShort()50%n/a111111
prefVecWidthInt()50%n/a111111
prefVecWidthLong()50%n/a111111
prefVecWidthFloat()50%n/a111111
prefVecWidthDouble()50%n/a111111
lambda$_add$5()50%n/a111111
lambda$_add$4(CLBackend)50%n/a111111
lambda$new$0()50%n/a111111
lambda$_writeNewNDConfig$6(cl_mem)40%n/a111111
getId()30%n/a111111
getPlatform()30%n/a111111
_approveExecutionOf(Tensor[], int, Operation)20%n/a111111
static {...}4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.java.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.java.html index 004ec5df9..0fb05a798 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLDevice.java.html @@ -88,12 +88,12 @@ private static final Logger _LOG = LoggerFactory.getLogger(OpenCLDevice.class); static OpenCLDevice of( OpenCLPlatform platform, cl_device_id did ) { - if (!platform.has(did)) platform.put(did, new OpenCLDevice(platform, did)); - return platform.get(did); + if (!platform.has(did)) platform.put(did, new OpenCLDevice(platform, did)); + return platform.get(did); } - public enum Type { - CPU, GPU, ACCELERATOR, DEFAULT, CUSTOM, ALL, UNKNOWN + public enum Type { + CPU, GPU, ACCELERATOR, DEFAULT, CUSTOM, ALL, UNKNOWN } enum cl_dtype { F32, F64, I64, I32, I16, I8, U32, U16, U8 } @@ -104,7 +104,7 @@ | --------------------------- */ - private final KernelCache _kernelCache = new KernelCache(); + private final KernelCache _kernelCache = new KernelCache(); private final cl_device_id _deviceId; @@ -121,7 +121,7 @@ private final cl_command_queue _queue; - private final Map<NDConfiguration, cl_config> _configs = new WeakHashMap<>(); + private final Map<NDConfiguration, cl_config> _configs = new WeakHashMap<>(); /*================================================================================================================== | @@ -134,38 +134,38 @@ * @param deviceId The underlying OpenCL id of this device. */ private OpenCLDevice( OpenCLPlatform platform, cl_device_id deviceId ) { - super(); - _deviceId = deviceId; - _platform = platform; - _queue = clCreateCommandQueueWithProperties(// Create a command-queue for the selected device - platform.getContext(), deviceId, + super(); + _deviceId = deviceId; + _platform = platform; + _queue = clCreateCommandQueueWithProperties(// Create a command-queue for the selected device + platform.getContext(), deviceId, null, null ); - _cleaning(this, () -> clReleaseCommandQueue(_queue)); - } + _cleaning(this, () -> clReleaseCommandQueue(_queue)); + } public final String toString() { - return "OpenCLDevice[id=0x" + Long.toHexString(_deviceId.getNativePointer()) + ",platform=0x" + Long.toHexString(_platform.getId()) + "]"; + return "OpenCLDevice[id=0x" + Long.toHexString(_deviceId.getNativePointer()) + ",platform=0x" + Long.toHexString(_platform.getId()) + "]"; } public final cl_device_id getId() { return _deviceId; } - public final OpenCLPlatform getPlatform() { return _platform; } + public final OpenCLPlatform getPlatform() { return _platform; } /** * @param name The name of the kernel whose presents should be checked. * @return True if the kernel is present in the cache, false otherwise. */ - public boolean hasAdHocKernel( String name ) { return _kernelCache.has(name); } + public boolean hasAdHocKernel( String name ) { return _kernelCache.has(name); } /** * @param name The name of the kernel which should be retrieved. * @return The kernel with the given name if it is present in the cache, throws an exception otherwise. */ public KernelCaller getAdHocKernel( String name ) { - cl_ad_hoc adHoc = _kernelCache.get(name); - if (adHoc != null) return new KernelCaller(adHoc.kernel, _queue); + cl_ad_hoc adHoc = _kernelCache.get(name); + if (adHoc != null) return new KernelCaller(adHoc.kernel, _queue); else throw new IllegalArgumentException("No ad hoc kernel with name '" + name + "' found!"); } @@ -174,8 +174,8 @@ * @return An {@link Optional} containing the kernel with the given name if it is present in the cache, an empty optional otherwise. */ public Optional<KernelCaller> findAdHocKernel( String name ) { - cl_ad_hoc adHoc = _kernelCache.get(name); - if (adHoc != null) return Optional.of(new KernelCaller(adHoc.kernel, _queue)); + cl_ad_hoc adHoc = _kernelCache.get(name); + if (adHoc != null) return Optional.of(new KernelCaller(adHoc.kernel, _queue)); else return Optional.empty(); } @@ -186,9 +186,9 @@ * or compiled from the given source code if it was not present in the cache. */ public KernelCaller findOrCompileAdHocKernel( String name, Supplier<String> source ) { - cl_ad_hoc adHoc = _kernelCache.get(name); - if ( adHoc != null ) return new KernelCaller(adHoc.kernel, _queue); - else return compileAndGetAdHocKernel(name, source.get()); + cl_ad_hoc adHoc = _kernelCache.get(name); + if ( adHoc != null ) return new KernelCaller(adHoc.kernel, _queue); + else return compileAndGetAdHocKernel(name, source.get()); } /** @@ -206,9 +206,9 @@ * @return The {@link KernelCaller} for the compiled kernel. */ public synchronized KernelCaller compileAndGetAdHocKernel( String name, String source ) { - return compileAdHocKernel( name, source ) - .findAdHocKernel( name ) - .orElseThrow(() -> new RuntimeException("Failed to compile kernel: " + name)); + return compileAdHocKernel( name, source ) + .findAdHocKernel( name ) + .orElseThrow(() -> new RuntimeException("Failed to compile kernel: " + name)); } /** @@ -226,22 +226,22 @@ * @return This very instance in order to enable the factory pattern. */ public synchronized OpenCLDevice compileAdHocKernel( String name, String source ) { - if (this.hasAdHocKernel(name)) { - cl_ad_hoc adHoc = _kernelCache.get(name); - String message = + if (this.hasAdHocKernel(name)) { + cl_ad_hoc adHoc = _kernelCache.get(name); + String message = "Cannot compile kernel source for name '" + name + "' because the name is already taken.\n" + "Use another name or find out why this kernel already exists.\n" + ( - adHoc.source.equals(source) - ? "Besides the name, the source code of the existing kernel is also identical.\n" : "" + adHoc.source.equals(source) + ? "Besides the name, the source code of the existing kernel is also identical.\n" : "" ); - _log.error(message); - throw new IllegalArgumentException(message); + _log.error(message); + throw new IllegalArgumentException(message); } // Create the program for the kernel - cl_program cpProgram = clCreateProgramWithSource( - getPlatform().getContext(), + cl_program cpProgram = clCreateProgramWithSource( + getPlatform().getContext(), 1, new String[]{source}, null, @@ -249,7 +249,7 @@ ); // Build the program - int err = clBuildProgram( + int err = clBuildProgram( cpProgram, 1, new cl_device_id[]{_deviceId}, @@ -258,39 +258,39 @@ null ); - if ( err != CL_SUCCESS ) + if ( err != CL_SUCCESS ) _log.error("Error when trying to compile 'ad hoc kernel' named '"+name+"'! Error code: "+err); //TODO: check compilation errors! cl_kernel kernel; try { // Create the kernel - kernel = clCreateKernel(cpProgram, name, null); - } catch (Exception e) { - if (e.getMessage().equals("CL_INVALID_KERNEL_NAME") && !source.contains("__kernel void " + name)) { - String message = "Method 'clCreateKernel' failed! The name of the '__kernel' method declared inside \n" + + kernel = clCreateKernel(cpProgram, name, null); + } catch (Exception e) { + if (e.getMessage().equals("CL_INVALID_KERNEL_NAME") && !source.contains("__kernel void " + name)) { + String message = "Method 'clCreateKernel' failed! The name of the '__kernel' method declared inside \n" + "the source String does not match the provided name needed for kernel creation."; - _log.error(message, e); - throw new IllegalArgumentException(message); + _log.error(message, e); + throw new IllegalArgumentException(message); } _log.error("Method call 'clCreateKernel(.., name=\"" + name + "\", ..)' failed!", e); throw e; - } - cl_ad_hoc adHoc = new cl_ad_hoc(source, kernel, cpProgram); + } + cl_ad_hoc adHoc = new cl_ad_hoc(source, kernel, cpProgram); // Storing the ad hoc object in a weak hash map for fast access by operations : - _kernelCache.put( name, adHoc ); + _kernelCache.put( name, adHoc ); - _cleaning(adHoc, () -> { + _cleaning(adHoc, () -> { clReleaseKernel(kernel); clReleaseProgram(cpProgram); }); - return this; + return this; } @Override public Operation optimizedOperationOf( Function function, String name ) { - return new CLFunctionCompiler( this, function, name ).optimize(); + return new CLFunctionCompiler( this, function, name ).optimize(); } /** @@ -298,10 +298,10 @@ */ @Override public void dispose() { - _numberOfTensors = 0; - clFinish( _queue ); - clReleaseCommandQueue( _queue ); - } + _numberOfTensors = 0; + clFinish( _queue ); + clReleaseCommandQueue( _queue ); + } /** * This method assumes that the passed tensor is stored on this device instance. @@ -314,23 +314,23 @@ */ @Override public Device<Number> restore( Tensor<Number> tensor ) { - if ( !this.has( tensor ) ) { - String message = "The passed tensor cannot be restored from this OpenCL device " + + if ( !this.has( tensor ) ) { + String message = "The passed tensor cannot be restored from this OpenCL device " + "because the tensor is not stored on the device.\n"; - _log.error(message); - throw new IllegalArgumentException(message); + _log.error(message); + throw new IllegalArgumentException(message); } - Object value = _read(JVMData.of(tensor.itemType(), tensor.isVirtual() ? 1 : tensor.size()), tensor, 0).getArray(); + Object value = _read(JVMData.of(tensor.itemType(), tensor.isVirtual() ? 1 : tensor.size()), tensor, 0).getArray(); - Class<?> arrayType = Objects.requireNonNull(tensor.getDataType().getTypeClassInstance(NumericType.class)).holderArrayType(); + Class<?> arrayType = Objects.requireNonNull(tensor.getDataType().getTypeClassInstance(NumericType.class)).holderArrayType(); - value = DataConverter.get().convert( value, arrayType ); + value = DataConverter.get().convert( value, arrayType ); - this.free( tensor ); - tensor.find( Tensor.class ).ifPresent( this::restore ); - tensor.getMut().setItems( value ); - return this; + this.free( tensor ); + tensor.find( Tensor.class ).ifPresent( this::restore ); + tensor.getMut().setItems( value ); + return this; } @@ -344,45 +344,45 @@ * @param tensor The tensor whose data ought to be stored. */ private <T extends Number> void _store(Tensor<T> tensor, Tensor<T> parent ) { - if (!parent.isOutsourced()) throw new IllegalStateException("Data parent is not outsourced!"); - _add( - tensor.getMut().upcast(Number.class), - parent.getMut().getData(), + if (!parent.isOutsourced()) throw new IllegalStateException("Data parent is not outsourced!"); + _add( + tensor.getMut().upcast(Number.class), + parent.getMut().getData(), () -> tensor.set((Component) this) ); - } + } private <T extends Number> void _add( Tensor<Number> tensor, Data<T> parentData, Runnable migration // Causes the device to be a component of the tensor! ) { - if ( this.has( tensor ) ) { - _LOG.debug("Trying to add a tensor to a device which already reports hosting it."); - return; + if ( this.has( tensor ) ) { + _LOG.debug("Trying to add a tensor to a device which already reports hosting it."); + return; } - boolean convertToFloat = Neureka.get() - .backend() - .find(CLBackend.class) - .map( it -> it.getSettings().isAutoConvertToFloat() ) - .orElse(false); + boolean convertToFloat = Neureka.get() + .backend() + .find(CLBackend.class) + .map( it -> it.getSettings().isAutoConvertToFloat() ) + .orElse(false); Data<Number> data; - if ( parentData == null ) { - if ( tensor.getMut().getData().owner() == this ) { + if ( parentData == null ) { + if ( tensor.getMut().getData().owner() == this ) { migration.run(); return; } - JVMData jvmData = null; - jvmData = JVMData.of( tensor.getMut().getData().getOrNull(), convertToFloat ); + JVMData jvmData = null; + jvmData = JVMData.of( tensor.getMut().getData().getOrNull(), convertToFloat ); cl_tsr<Number, Number> newClt; - newClt = _storeNew( jvmData ); - if ( tensor.rqsGradient() && tensor.hasGradient() ) + newClt = _storeNew( jvmData ); + if ( tensor.rqsGradient() && tensor.hasGradient() ) this.store(tensor.gradient().orElseThrow(()->new IllegalStateException("Gradient missing!"))); - cl_mem[] memos = new cl_mem[]{newClt.value.data}; - clEnqueueMigrateMemObjects( + cl_mem[] memos = new cl_mem[]{newClt.value.data}; + clEnqueueMigrateMemObjects( _queue, memos.length, memos, CL_MIGRATE_MEM_OBJECT_HOST, 0, @@ -390,69 +390,69 @@ null ); - data = _dataArrayOf(newClt, (DataType<Number>) _dataTypeOf(newClt)); - } + data = _dataArrayOf(newClt, (DataType<Number>) _dataTypeOf(newClt)); + } else data = (Data<Number>) parentData; - tensor.getMut().setData( data ); - migration.run(); + tensor.getMut().setData( data ); + migration.run(); // When tensors get stored on this device, // they can be implicitly converted to a float tensor: - if ( convertToFloat ) - tensor.getMut().toType(F32.class); - } + if ( convertToFloat ) + tensor.getMut().toType(F32.class); + } private cl_tsr<Number, Number> _storeNew( JVMData jvmData ) { - return _storeNew( jvmData, false ); + return _storeNew( jvmData, false ); } private cl_tsr<Number, Number> _storeNew( JVMData jvmData, boolean allocateTargetSize ) { - cl_tsr.cl_value newVal = new cl_tsr.cl_value((int) (allocateTargetSize ? jvmData.getTargetLength() : jvmData.getLength())); - cl_tsr<Number, Number> newClt = new cl_tsr<>(newVal, jvmData.getType()); - _store( jvmData, newClt, allocateTargetSize ); - return newClt; + cl_tsr.cl_value newVal = new cl_tsr.cl_value((int) (allocateTargetSize ? jvmData.getTargetLength() : jvmData.getLength())); + cl_tsr<Number, Number> newClt = new cl_tsr<>(newVal, jvmData.getType()); + _store( jvmData, newClt, allocateTargetSize ); + return newClt; } public cl_config clConfigOf(Tensor<?> t ) { - return clConfigOf( t.getNDConf() ); + return clConfigOf( t.getNDConf() ); } public cl_config clConfigOf(NDConfiguration ndc ) { - cl_config config = _configs.get(ndc); - if ( config == null ) { - config = _writeNewNDConfig( ndc ); - _configs.put(ndc, config); + cl_config config = _configs.get(ndc); + if ( config == null ) { + config = _writeNewNDConfig( ndc ); + _configs.put(ndc, config); } - return config; + return config; } private cl_config _writeNewNDConfig(NDConfiguration ndc ) { - cl_config clf = new cl_config(); + cl_config clf = new cl_config(); //Config format: <[ shape | strides | indicesMap | indices | scale ]> - int[] config = ndc.asInlineArray(); + int[] config = ndc.asInlineArray(); //shape/strides/map/offset/spread - clf.data = clCreateBuffer( - _platform.getContext(), + clf.data = clCreateBuffer( + _platform.getContext(), CL_MEM_READ_WRITE, (long) config.length * Sizeof.cl_int, null, null ); - clEnqueueWriteBuffer( + clEnqueueWriteBuffer( _queue, clf.data, CL_TRUE, 0, (long) config.length * Sizeof.cl_int, - Pointer.to(config), + Pointer.to(config), 0, null, null ); - final cl_mem clConfMem = clf.data; - _cleaning( clf, () -> clReleaseMemObject(clConfMem) ); - return clf; + final cl_mem clConfMem = clf.data; + _cleaning( clf, () -> clReleaseMemObject(clConfMem) ); + return clf; } private void _store( @@ -460,53 +460,53 @@ cl_tsr<?, ?> newClTensor, boolean allocateTarget ) { - long bufferLength = allocateTarget ? jvmData.getTargetLength() : jvmData.getLength(); + long bufferLength = allocateTarget ? jvmData.getTargetLength() : jvmData.getLength(); - cl_mem mem = clCreateBuffer( - _platform.getContext(), + cl_mem mem = clCreateBuffer( + _platform.getContext(), CL_MEM_READ_WRITE, - (long) jvmData.getItemSize() * bufferLength, + (long) jvmData.getItemSize() * bufferLength, null, null ); - newClTensor.value.data = mem; + newClTensor.value.data = mem; // Virtual means that there is only a single value in the JVM array. // So we don't have to write the whole array to the device! // Instead, we can just fill the device memory with the single value. - boolean isASingleValue = jvmData.isVirtual(); + boolean isASingleValue = jvmData.isVirtual(); - if ( isASingleValue ) - clEnqueueFillBuffer( - _queue, mem, jvmData.getPointer(), // pattern - jvmData.getItemSize(), 0, - (long) jvmData.getItemSize() * bufferLength, + if ( isASingleValue ) + clEnqueueFillBuffer( + _queue, mem, jvmData.getPointer(), // pattern + jvmData.getItemSize(), 0, + (long) jvmData.getItemSize() * bufferLength, 0, null, null ); else - clEnqueueWriteBuffer( + clEnqueueWriteBuffer( _queue, mem, CL_TRUE, 0, - (long) jvmData.getItemSize() * bufferLength, - jvmData.getPointer(), 0, null, null + (long) jvmData.getItemSize() * bufferLength, + jvmData.getPointer(), 0, null, null ); - } + } @Override public final <T extends Number> Device<Number> free( Tensor<T> tensor ) { - cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); - if ( clt == null ) return this; - tensor.getMut().setData(null); - tensor.find(Device.class).ifPresent( + cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); + if ( clt == null ) return this; + tensor.getMut().setData(null); + tensor.find(Device.class).ifPresent( device -> { - tensor.remove( Device.class ); - tensor.find(Tensor.class).ifPresent( + tensor.remove( Device.class ); + tensor.find(Tensor.class).ifPresent( gradient -> - ( (Tensor<Number>) gradient ).find(Device.class).ifPresent( + ( (Tensor<Number>) gradient ).find(Device.class).ifPresent( gradDevice -> { try { - if ( this.has( gradient ) ) gradDevice.restore( gradient ); + if ( this.has( gradient ) ) gradDevice.restore( gradient ); } catch ( Exception exception ) { _LOG.error( @@ -514,29 +514,29 @@ exception ); throw exception; - } - gradient.remove( Device.class ); - }) + } + gradient.remove( Device.class ); + }) ); - } + } ); - return this; + return this; } @Override protected final <T extends Number> T _readItem( Tensor<T> tensor, int index ) { - return (T) _read(JVMData.of(tensor.itemType(), 1), tensor.getMut().upcast(Number.class), index).getElementAt(0); + return (T) _read(JVMData.of(tensor.itemType(), 1), tensor.getMut().upcast(Number.class), index).getElementAt(0); } @Override protected final <T extends Number, A> A _readArray( Tensor<T> tensor, Class<A> arrayType, int start, int size ) { - return (A) _read(JVMData.of(tensor.itemType(), size), tensor.getMut().upcast(Number.class), start).getArray(); + return (A) _read(JVMData.of(tensor.itemType(), size), tensor.getMut().upcast(Number.class), start).getArray(); } @Override protected final <T extends Number> void _writeItem( Tensor<T> tensor, T item, int start, int size ) { - _overwrite( tensor, start, JVMData.of(item, size, 0) ); - } + _overwrite( tensor, start, JVMData.of(item, size, 0) ); + } @Override protected final <T extends Number> void _writeArray( @@ -546,8 +546,8 @@ int start, int size ) { - _overwrite( tensor, start, JVMData.of(array, size, offset) ); - } + _overwrite( tensor, start, JVMData.of(array, size, offset) ); + } @Override public <T extends Number> Data<T> allocate( DataType<T> dataType, NDConfiguration ndc ) { @@ -558,9 +558,9 @@ @Override public <T extends Number> Data<T> allocateFromOne( DataType<T> dataType, NDConfiguration ndc, T initialValue ) { - JVMData jvmData = JVMData.of( initialValue, ndc.size(), false, true ); - cl_tsr<Number, Number> clt = _storeNew(jvmData ); - return (Data<T>) _dataArrayOf(clt, (DataType<Number>) _dataTypeOf(clt)); + JVMData jvmData = JVMData.of( initialValue, ndc.size(), false, true ); + cl_tsr<Number, Number> clt = _storeNew(jvmData ); + return (Data<T>) _dataArrayOf(clt, (DataType<Number>) _dataTypeOf(clt)); } @Override @@ -572,41 +572,41 @@ @Override protected Data<Number> _actualize( Tensor<?> tensor ) { - NDConfiguration ndc = tensor.getNDConf(); - Object initialValue = tensor.item(); - cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); - if ( clt == null ) throw new IllegalStateException("The tensor has no device component!"); - JVMData jvmData = JVMData.of( initialValue, ndc.size(), false, true ); - clt = _storeNew( jvmData, true ); - return _dataArrayOf(clt, (DataType<Number>) _dataTypeOf(clt)); + NDConfiguration ndc = tensor.getNDConf(); + Object initialValue = tensor.item(); + cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); + if ( clt == null ) throw new IllegalStateException("The tensor has no device component!"); + JVMData jvmData = JVMData.of( initialValue, ndc.size(), false, true ); + clt = _storeNew( jvmData, true ); + return _dataArrayOf(clt, (DataType<Number>) _dataTypeOf(clt)); } @Override protected Data<Number> _virtualize( Tensor<?> tensor ) { - NDConfiguration ndc = tensor.getNDConf(); - Object initialValue = tensor.item(); - cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); - if ( clt == null ) throw new IllegalStateException("The tensor has no device component!"); - JVMData jvmData = JVMData.of( initialValue, ndc.size(), false, true ); - clt = _storeNew( jvmData, false ); - return _dataArrayOf(clt, (DataType<Number>) _dataTypeOf(clt)); + NDConfiguration ndc = tensor.getNDConf(); + Object initialValue = tensor.item(); + cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); + if ( clt == null ) throw new IllegalStateException("The tensor has no device component!"); + JVMData jvmData = JVMData.of( initialValue, ndc.size(), false, true ); + clt = _storeNew( jvmData, false ); + return _dataArrayOf(clt, (DataType<Number>) _dataTypeOf(clt)); } @Override protected final DataType<?> _dataTypeOf( Object rawData ) { - LogUtil.nullArgCheck( rawData, "rawData", Object.class ); - if ( rawData instanceof cl_tsr ) { - cl_dtype type = ((cl_tsr) rawData).dtype; - switch ( type ) { - case F32: return DataType.of( Float.class ); - case F64: return DataType.of( Double.class ); + LogUtil.nullArgCheck( rawData, "rawData", Object.class ); + if ( rawData instanceof cl_tsr ) { + cl_dtype type = ((cl_tsr) rawData).dtype; + switch ( type ) { + case F32: return DataType.of( Float.class ); + case F64: return DataType.of( Double.class ); case I32: case U32: - return DataType.of( Integer.class ); - case I64: return DataType.of( Long.class ); + return DataType.of( Integer.class ); + case I64: return DataType.of( Long.class ); case I16: case U16: - return DataType.of( Short.class ); + return DataType.of( Short.class ); case I8: case U8: - return DataType.of( Byte.class ); + return DataType.of( Byte.class ); default: throw new IllegalStateException("Unknown OpenCL data type!"); } } @@ -616,20 +616,20 @@ private void _overwrite( Tensor<?> tensor, long offset, JVMData jvmData ) { - if ( jvmData.getLength() == 0 ) return; - cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); - - if ( clt.value.event != null ) clWaitForEvents(1, new cl_event[]{clt.value.event}); - clt.value.event = new cl_event(); - long start = offset * jvmData.getItemSize(); - long size = jvmData.getItemSize() * jvmData.getLength(); - clEnqueueWriteBuffer( + if ( jvmData.getLength() == 0 ) return; + cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); + + if ( clt.value.event != null ) clWaitForEvents(1, new cl_event[]{clt.value.event}); + clt.value.event = new cl_event(); + long start = offset * jvmData.getItemSize(); + long size = jvmData.getItemSize() * jvmData.getLength(); + clEnqueueWriteBuffer( _queue, clt.value.data, CL_TRUE, start, size, - jvmData.getPointer(), 0, null, + jvmData.getPointer(), 0, null, clt.value.event ); - } + } @Override protected final <T extends Number> void _swap( Tensor<T> former, Tensor<T> replacement) { @@ -640,53 +640,53 @@ @Override public boolean update( OwnerChangeRequest<Tensor<Number>> changeRequest ) { - super.update(changeRequest); - if ( changeRequest.type() == IsBeing.ADDED ) { - Tensor<Number> newOwner = changeRequest.getNewOwner(); - _updateInternal(newOwner, changeRequest::executeChange); - } else - changeRequest.executeChange(); // This can be an 'add', 'remove' or 'transfer' of this component! - return true; + super.update(changeRequest); + if ( changeRequest.type() == IsBeing.ADDED ) { + Tensor<Number> newOwner = changeRequest.getNewOwner(); + _updateInternal(newOwner, changeRequest::executeChange); + } else + changeRequest.executeChange(); // This can be an 'add', 'remove' or 'transfer' of this component! + return true; } @Override - protected <T extends Number> int _sizeOccupiedBy( Tensor<T> tensor ) { return tensor.getMut().getData().as( cl_tsr.class).value.size; } + protected <T extends Number> int _sizeOccupiedBy( Tensor<T> tensor ) { return tensor.getMut().getData().as( cl_tsr.class).value.size; } @Override protected <T extends Number> Object _readAll( Tensor<T> tensor, boolean clone ) { - cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); - return _readArray( tensor, tensor.getDataType().dataArrayType(), 0, clt.value.size ); + cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); + return _readArray( tensor, tensor.getDataType().dataArrayType(), 0, clt.value.size ); } private void _updateInternal( Tensor<Number> newOwner, Runnable migration) { - Tensor<Number> root = _findRoot( newOwner ); - if (root != null) _store(newOwner, root); - else _add( newOwner, null, migration ); - } + Tensor<Number> root = _findRoot( newOwner ); + if (root != null) _store(newOwner, root); + else _add( newOwner, null, migration ); + } private Tensor<Number> _findRoot( Tensor<Number> newOwner ) { - Tensor<Number> root = null; - Relation<Number> relation = newOwner.get(Relation.class); - if ( relation != null ) - root = ((Relation<Number>) newOwner.get(Relation.class)).findRootTensor().orElse(null); + Tensor<Number> root = null; + Relation<Number> relation = newOwner.get(Relation.class); + if ( relation != null ) + root = ((Relation<Number>) newOwner.get(Relation.class)).findRootTensor().orElse(null); - return root; + return root; } private JVMData _read( JVMData jvmData, Tensor<Number> tensor, int offset ) { - cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); - clEnqueueReadBuffer( + cl_tsr<?, ?> clt = tensor.getMut().getData().as( cl_tsr.class); + clEnqueueReadBuffer( _queue, clt.value.data, CL_TRUE, - (long) offset * jvmData.getItemSize(), // one double == eight byte - (long) jvmData.getItemSize() * jvmData.getLength(), - jvmData.getPointer(), + (long) offset * jvmData.getItemSize(), // one double == eight byte + (long) jvmData.getItemSize() * jvmData.getLength(), + jvmData.getPointer(), 0, null, null ); - return jvmData; + return jvmData; } /** @@ -695,23 +695,23 @@ */ public KernelCaller getKernel( ExecutionCall<OpenCLDevice> call ) { String chosen; - Algorithm algorithm = call.getAlgorithm(); - DeviceAlgorithm<?> deviceAlgorithm = ( algorithm instanceof DeviceAlgorithm ? ((DeviceAlgorithm<?>) algorithm) : null ); + Algorithm algorithm = call.getAlgorithm(); + DeviceAlgorithm<?> deviceAlgorithm = ( algorithm instanceof DeviceAlgorithm ? ((DeviceAlgorithm<?>) algorithm) : null ); // We create the kernel name from the chosen algorithm: - ImplementationFor<OpenCLDevice> impl = ( deviceAlgorithm == null ? null : deviceAlgorithm.getImplementationFor(OpenCLDevice.class) ); - if ( impl instanceof CLImplementation && _platform.hasKernel(((CLImplementation) impl).getKernelFor(call).getName()) ) { - chosen = ((CLImplementation) impl).getKernelFor( call ).getName(); + ImplementationFor<OpenCLDevice> impl = ( deviceAlgorithm == null ? null : deviceAlgorithm.getImplementationFor(OpenCLDevice.class) ); + if ( impl instanceof CLImplementation && _platform.hasKernel(((CLImplementation) impl).getKernelFor(call).getName()) ) { + chosen = ((CLImplementation) impl).getKernelFor( call ).getName(); } else chosen = call.getAlgorithm().getName() + "_" + call.getOperation().getIdentifier(); - cl_kernel kernel = _platform.getKernel( chosen ); - if ( kernel == null ) + cl_kernel kernel = _platform.getKernel( chosen ); + if ( kernel == null ) throw new IllegalStateException( "No kernel found for signature '" + chosen + "' for operation '" + call.getOperation().getIdentifier() + "'." ); - return new KernelCaller(kernel, _queue); + return new KernelCaller(kernel, _queue); } /** @@ -719,14 +719,14 @@ * @return A {@link KernelCaller} for calling the requested kernel. */ public KernelCaller getKernel( String name ) { - cl_kernel kernel = _platform.getKernel( name ); - if ( kernel == null ) + cl_kernel kernel = _platform.getKernel( name ); + if ( kernel == null ) throw new IllegalStateException("No kernel found with name '" + name + "'."); - return new KernelCaller(kernel, _queue); + return new KernelCaller(kernel, _queue); } @Override - protected boolean _approveExecutionOf(Tensor<?>[] tensors, int d, Operation type ) { return true; } + protected boolean _approveExecutionOf(Tensor<?>[] tensors, int d, Operation type ) { return true; } /*================================================================================================================== @@ -735,16 +735,16 @@ | --------------------------- */ - public String name() { return Query.getString( _deviceId, CL_DEVICE_NAME ); } + public String name() { return Query.getString( _deviceId, CL_DEVICE_NAME ); } - public String vendor() { return Query.getString(_deviceId, CL_DEVICE_VENDOR); } + public String vendor() { return Query.getString(_deviceId, CL_DEVICE_VENDOR); } - public String version() { return Query.getString(_deviceId, CL_DRIVER_VERSION); } + public String version() { return Query.getString(_deviceId, CL_DRIVER_VERSION); } public Type type() { - long deviceType = Query.getLong(_deviceId, CL_DEVICE_TYPE); - if ( (deviceType & CL_DEVICE_TYPE_CPU ) != 0 ) return Type.CPU; - if ( (deviceType & CL_DEVICE_TYPE_GPU ) != 0 ) return Type.GPU; + long deviceType = Query.getLong(_deviceId, CL_DEVICE_TYPE); + if ( (deviceType & CL_DEVICE_TYPE_CPU ) != 0 ) return Type.CPU; + if ( (deviceType & CL_DEVICE_TYPE_GPU ) != 0 ) return Type.GPU; if ( (deviceType & CL_DEVICE_TYPE_ACCELERATOR ) != 0 ) return Type.ACCELERATOR; if ( (deviceType & CL_DEVICE_TYPE_DEFAULT ) != 0 ) return Type.DEFAULT; if ( (deviceType & CL_DEVICE_TYPE_CUSTOM ) != 0 ) return Type.CUSTOM; @@ -758,15 +758,15 @@ public long[] maxWorkItemSizes() { return Query.getSizes(_deviceId, CL_DEVICE_MAX_WORK_ITEM_SIZES, 3); } - public long maxWorkGroupSize() { return Query.getSize(_deviceId, CL_DEVICE_MAX_WORK_GROUP_SIZE); } + public long maxWorkGroupSize() { return Query.getSize(_deviceId, CL_DEVICE_MAX_WORK_GROUP_SIZE); } - public long maxClockFrequenzy() { return Query.getLong(_deviceId, CL_DEVICE_MAX_CLOCK_FREQUENCY); } + public long maxClockFrequenzy() { return Query.getLong(_deviceId, CL_DEVICE_MAX_CLOCK_FREQUENCY); } public int maxAddressBits() { return Query.getInt(_deviceId, CL_DEVICE_ADDRESS_BITS); } public long maxMemAllocSize() { return Query.getLong(_deviceId, CL_DEVICE_MAX_MEM_ALLOC_SIZE); } - public long globalMemSize() { return Query.getLong(_deviceId, CL_DEVICE_GLOBAL_MEM_SIZE); } + public long globalMemSize() { return Query.getLong(_deviceId, CL_DEVICE_GLOBAL_MEM_SIZE); } public int errorCorrectionSupport() { return Query.getInt(_deviceId, CL_DEVICE_ERROR_CORRECTION_SUPPORT); } @@ -774,7 +774,7 @@ public long localMemSize() { return Query.getLong(_deviceId, CL_DEVICE_LOCAL_MEM_SIZE); } - public long maxConstantBufferSize() { return Query.getLong(_deviceId, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE); } + public long maxConstantBufferSize() { return Query.getLong(_deviceId, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE); } public long maxConstantBufferSizeKB() { return (int) (Query.getLong(_deviceId, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE) / 1024); } @@ -782,31 +782,31 @@ public int maxReadImageArgs() { return Query.getInt(_deviceId, CL_DEVICE_MAX_READ_IMAGE_ARGS); } - public int maxWriteImageArgs() { return Query.getInt(_deviceId, CL_DEVICE_MAX_WRITE_IMAGE_ARGS); } + public int maxWriteImageArgs() { return Query.getInt(_deviceId, CL_DEVICE_MAX_WRITE_IMAGE_ARGS); } public long singleFPConfig() { return Query.getLong(_deviceId, CL_DEVICE_SINGLE_FP_CONFIG); } - public long image2DMaxWidth() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE2D_MAX_WIDTH); } + public long image2DMaxWidth() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE2D_MAX_WIDTH); } - public long image2DMaxHeight() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE2D_MAX_HEIGHT); } + public long image2DMaxHeight() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE2D_MAX_HEIGHT); } - public long image3DMaxWidth() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE3D_MAX_WIDTH); } + public long image3DMaxWidth() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE3D_MAX_WIDTH); } - public long image3DMaxHeight() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE3D_MAX_HEIGHT); } + public long image3DMaxHeight() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE3D_MAX_HEIGHT); } - public long image3DMaxDepth() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE3D_MAX_DEPTH); } + public long image3DMaxDepth() { return Query.getSize(_deviceId, CL_DEVICE_IMAGE3D_MAX_DEPTH); } - public int prefVecWidthChar() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR); } + public int prefVecWidthChar() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR); } - public int prefVecWidthShort() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT); } + public int prefVecWidthShort() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT); } - public int prefVecWidthInt() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT); } + public int prefVecWidthInt() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT); } - public int prefVecWidthLong() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG); } + public int prefVecWidthLong() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG); } - public int prefVecWidthFloat() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT); } + public int prefVecWidthFloat() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT); } - public int prefVecWidthDouble() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE); } + public int prefVecWidthDouble() { return Query.getInt(_deviceId, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE); } public static class Query { /** @@ -817,7 +817,7 @@ * @return The value */ public static int getInt(cl_device_id device, int paramName) { - return getInts(device, paramName, 1)[0]; + return getInts(device, paramName, 1)[0]; } /** @@ -829,9 +829,9 @@ * @return The value */ public static int[] getInts(cl_device_id device, int paramName, int numValues) { - int[] values = new int[numValues]; - clGetDeviceInfo(device, paramName, (long) Sizeof.cl_int * numValues, Pointer.to(values), null); - return values; + int[] values = new int[numValues]; + clGetDeviceInfo(device, paramName, (long) Sizeof.cl_int * numValues, Pointer.to(values), null); + return values; } /** @@ -842,7 +842,7 @@ * @return The value */ public static long getLong(cl_device_id device, int paramName) { - return getLongs(device, paramName, 1)[0]; + return getLongs(device, paramName, 1)[0]; } /** @@ -854,9 +854,9 @@ * @return The value */ public static long[] getLongs(cl_device_id device, int paramName, int numValues) { - long[] values = new long[numValues]; - clGetDeviceInfo(device, paramName, (long) Sizeof.cl_long * numValues, Pointer.to(values), null); - return values; + long[] values = new long[numValues]; + clGetDeviceInfo(device, paramName, (long) Sizeof.cl_long * numValues, Pointer.to(values), null); + return values; } /** @@ -868,15 +868,15 @@ */ public static String getString(cl_device_id device, int paramName) { // Obtain the length of the string that will be queried - long[] size = new long[1]; - clGetDeviceInfo(device, paramName, 0, null, size); + long[] size = new long[1]; + clGetDeviceInfo(device, paramName, 0, null, size); // Create a buffer of the appropriate size and fill it with the info - byte[] buffer = new byte[(int) size[0]]; - clGetDeviceInfo(device, paramName, buffer.length, Pointer.to(buffer), null); + byte[] buffer = new byte[(int) size[0]]; + clGetDeviceInfo(device, paramName, buffer.length, Pointer.to(buffer), null); // Create a string from the buffer (excluding the trailing \0 byte) - return new String(buffer, 0, buffer.length - 1); + return new String(buffer, 0, buffer.length - 1); } /** @@ -907,7 +907,7 @@ * @return The value64 */ public static long getSize(cl_device_id device, int paramName) { - return getSizes(device, paramName, 1)[0]; + return getSizes(device, paramName, 1)[0]; } /** @@ -921,47 +921,47 @@ public static long[] getSizes(cl_device_id device, int paramName, int numValues) { // The size of the returned data has to depend on // the size of a size_t, which is handled here - ByteBuffer buffer = ByteBuffer.allocate(numValues * Sizeof.size_t).order(ByteOrder.nativeOrder()); - clGetDeviceInfo( + ByteBuffer buffer = ByteBuffer.allocate(numValues * Sizeof.size_t).order(ByteOrder.nativeOrder()); + clGetDeviceInfo( device, paramName, (long) Sizeof.size_t * numValues, - Pointer.to(buffer), + Pointer.to(buffer), null ); - long[] values = new long[numValues]; - return getLongs(numValues, buffer, values); + long[] values = new long[numValues]; + return getLongs(numValues, buffer, values); } public static long[] getLongs(int numValues, ByteBuffer buffer, long[] values) { - if (Sizeof.size_t == 4) + if (Sizeof.size_t == 4) for (int i = 0; i < numValues; i++) values[i] = buffer.getInt(i * Sizeof.size_t); else - for ( int i = 0; i < numValues; i++ ) - values[i] = buffer.getLong(i * Sizeof.size_t); + for ( int i = 0; i < numValues; i++ ) + values[i] = buffer.getLong(i * Sizeof.size_t); - return values; + return values; } } private <T extends Number> Data<T> _dataArrayOf( Object data, DataType<T> dataType ) { - return (Data<T>) new CLData(this, data, (DataType<Number>) dataType); + return (Data<T>) new CLData(this, data, (DataType<Number>) dataType); } - private static class CLData extends AbstractDeviceData<Number> { + private static class CLData extends AbstractDeviceData<Number> { public CLData( AbstractBaseDevice<Number> owner, Object dataRef, DataType<Number> dataType ) { - super(owner, dataRef, dataType, ()->{ + super(owner, dataRef, dataType, ()->{ // In this lambda we free the memory, because the data is no longer needed! - cl_tsr<?,?> clTsr = (cl_tsr<?,?>) dataRef; - if ( clTsr.value.event != null ) clWaitForEvents(1, new cl_event[]{clTsr.value.event}); - clReleaseMemObject(clTsr.value.data); // Removing data from the device! - }); - assert !(dataRef instanceof Data); - } + cl_tsr<?,?> clTsr = (cl_tsr<?,?>) dataRef; + if ( clTsr.value.event != null ) clWaitForEvents(1, new cl_event[]{clTsr.value.event}); + clReleaseMemObject(clTsr.value.data); // Removing data from the device! + }); + assert !(dataRef instanceof Data); + } } @@ -981,10 +981,10 @@ */ static class cl_tsr<V, T extends V> { - cl_tsr(cl_tsr.cl_value value, cl_dtype dtype) { - this.value = value; - this.dtype = dtype; - } + cl_tsr(cl_tsr.cl_value value, cl_dtype dtype) { + this.value = value; + this.dtype = dtype; + } /** * This class is responsible for representing the @@ -996,7 +996,7 @@ */ static class cl_value { - cl_value( int size ) { this.size = size; } + cl_value( int size ) { this.size = size; } public final int size; public cl_mem data; @@ -1008,8 +1008,12 @@ @Override public boolean equals(Object obj) { - if ( !(obj instanceof cl_tsr) ) return false; - return ((cl_tsr) obj).value == this.value; + if ( !(obj instanceof cl_tsr) ) return false; + return ((cl_tsr) obj).value == this.value; + } + + @Override public int hashCode() { + return value.hashCode(); } } @@ -1030,11 +1034,11 @@ public cl_ad_hoc( String source, cl_kernel kernel, cl_program program - ) { - this.source = source; - this.kernel = kernel; - this.program = program; - } + ) { + this.source = source; + this.kernel = kernel; + this.program = program; + } } /** @@ -1044,8 +1048,8 @@ * Meaning this inner memory object "cl_mem" will * be freed via a call hook stored inside a Cleaner instance... */ - static final class cl_config { + static final class cl_config { public cl_mem data; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.html index e7912e1c9..611191524 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.html @@ -1 +1 @@ -OpenCLPlatform

OpenCLPlatform

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total148 of 77981%12 of 7283%105219115116
OpenCLPlatform(cl_platform_id)9411555%4450%35113401
_compile(cl_device_id[])2740993%65690%53235801
recompile()270%20%225511
toString()37100%n/a010401
getDevices()11100%n/a010301
lambda$_compile$0(ArrayList, ArrayList, String, String)9100%n/a010101
put(cl_device_id, OpenCLDevice)7100%n/a010201
get(cl_device_id)6100%n/a010101
getKernel(String)6100%n/a010101
has(cl_device_id)5100%n/a010101
hasKernel(String)5100%n/a010101
dispose()5100%n/a010201
lambda$getDevices$1(List, cl_device_id, OpenCLDevice)5100%n/a010101
getId()4100%n/a010101
static {...}4100%n/a010101
getContext()100%n/a010101
\ No newline at end of file +OpenCLPlatform

OpenCLPlatform

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total816 of 8160%74 of 740%53531201201616
_compile(cl_device_id[])4360%620%3232585811
OpenCLPlatform(cl_platform_id)2460%100%66393911
toString()370%n/a114411
recompile()270%20%225511
getDevices()110%n/a113311
lambda$_compile$0(ArrayList, ArrayList, String, String)90%n/a111111
put(cl_device_id, OpenCLDevice)70%n/a112211
get(cl_device_id)60%n/a111111
getKernel(String)60%n/a111111
has(cl_device_id)50%n/a111111
hasKernel(String)50%n/a111111
dispose()50%n/a112211
lambda$getDevices$1(List, cl_device_id, OpenCLDevice)50%n/a111111
getId()40%n/a111111
static {...}40%n/a111111
getContext()0%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.java.html b/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.java.html index 6aa401dd7..73226c54b 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.java.html +++ b/docs/coverage/test/html/neureka.devices.opencl/OpenCLPlatform.java.html @@ -38,75 +38,87 @@ */ public class OpenCLPlatform { - private final static Logger _LOG = LoggerFactory.getLogger( OpenCLPlatform.class ); + private final static Logger _LOG = LoggerFactory.getLogger( OpenCLPlatform.class ); private final cl_platform_id _pid; private final cl_context _context; private final Map<cl_device_id, OpenCLDevice> _id_device; - private final Map<String, cl_kernel> _kernels = new HashMap<>(); + private final Map<String, cl_kernel> _kernels = new HashMap<>(); public OpenCLPlatform(cl_platform_id pid) - { - _id_device = new TreeMap<>(Comparator.comparingInt(NativePointerObject::hashCode)); - _pid = pid; + { + _id_device = new TreeMap<>(Comparator.comparingInt(NativePointerObject::hashCode)); + _pid = pid; // Obtain the number of devices for the current platform - int[] numDevices = new int[ 1 ]; - clGetDeviceIDs(pid, CL_DEVICE_TYPE_ALL, 0, null, numDevices); - cl_device_id[] devicesArray = new cl_device_id[numDevices[ 0 ]]; - clGetDeviceIDs(pid, CL_DEVICE_TYPE_ALL, numDevices[ 0 ], devicesArray, null); + int[] numDevices = new int[ 1 ]; + clGetDeviceIDs(pid, CL_DEVICE_TYPE_ALL, 0, null, numDevices); + cl_device_id[] devicesArray = new cl_device_id[numDevices[ 0 ]]; + clGetDeviceIDs(pid, CL_DEVICE_TYPE_ALL, numDevices[ 0 ], devicesArray, null); + if ( numDevices[0] == 0 ) { + String vendor = OpenCLDevice.Query.getString(pid, CL_PLATFORM_VENDOR); + String platformName = OpenCLDevice.Query.getString(pid, CL_PLATFORM_NAME); + _LOG.warn( + "Could not find any OpenCL devices for platform '{}' with id '0x{}' from vendor '{}'. \n" + + "Although an OpenCL platform is present, it does not seem to find any devices. \n" + + "Does your hardware support OpenCL? \n", + platformName, Long.toHexString(pid.getNativePointer()), vendor, + new Throwable() + ); + } + // Enable exceptions and subsequently omit error checks in this sample - setExceptionsEnabled( true ); + setExceptionsEnabled( true ); // Initialize the context properties - cl_context_properties contextProperties = new cl_context_properties(); - contextProperties.addProperty(CL_CONTEXT_PLATFORM, pid); + cl_context_properties contextProperties = new cl_context_properties(); + contextProperties.addProperty(CL_CONTEXT_PLATFORM, pid); // Create a context for the selected device - _context = clCreateContext( + _context = clCreateContext( contextProperties, devicesArray.length, devicesArray, null, null, null ); - List<cl_device_id> successfullyLoaded = new ArrayList<>(); + List<cl_device_id> successfullyLoaded = new ArrayList<>(); - List<String> failures = new ArrayList<>(); + List<String> failures = new ArrayList<>(); // Collect all devices of this platform - for (cl_device_id did : devicesArray) { + for (cl_device_id did : devicesArray) { try { - OpenCLDevice clDevice = OpenCLDevice.of(this, did); - _id_device.put(did, clDevice); - successfullyLoaded.add(did); - } catch ( Exception e ) { - String message = - "Failed to create '"+OpenCLDevice.class.getSimpleName()+"' instance for " + - "OpenCL device id '0x" + Long.toHexString(did.getNativePointer()) + "' under platform id '0x"+Long.toHexString(pid.getNativePointer())+"'!"; - _LOG.error(message, e); - failures.add(message + " Reason: " + e.getMessage()); - } + OpenCLDevice clDevice = OpenCLDevice.of(this, did); + _id_device.put(did, clDevice); + successfullyLoaded.add(did); + } catch ( Exception e ) { + String message = + "Failed to create '"+OpenCLDevice.class.getSimpleName()+"' instance for " + + "OpenCL device id '0x" + Long.toHexString(did.getNativePointer()) + "' under platform id '0x"+Long.toHexString(pid.getNativePointer())+"'!"; + _LOG.error(message, e); + failures.add(message + " Reason: " + e.getMessage()); + } } - if ( !successfullyLoaded.isEmpty() ) - _compile(successfullyLoaded.toArray(new cl_device_id[0])); + if ( !successfullyLoaded.isEmpty() ) + _compile(successfullyLoaded.toArray(new cl_device_id[0])); else - _LOG.warn( - "'"+this.getClass().getSimpleName()+"' with id '"+Long.toHexString(pid.getNativePointer())+"' does not have a valid device attached to it!" + _LOG.warn( + "'"+this.getClass().getSimpleName()+"' with id '"+Long.toHexString(pid.getNativePointer())+"' does not have a valid device attached to it!" ); - if ( successfullyLoaded.isEmpty() && devicesArray.length > 0 ) - throw new RuntimeException( - "Failed to create '"+OpenCLDevice.class.getSimpleName()+"' instances for all devices of platform id '0x"+Long.toHexString(pid.getNativePointer())+"'! \n" + - "Reasons: \n " + failures.stream().collect(Collectors.joining("\n ")) + if ( successfullyLoaded.isEmpty() && devicesArray.length > 0 ) + throw new RuntimeException( + "Failed to create '"+OpenCLDevice.class.getSimpleName()+"' instances for all devices of platform id '0x"+Long.toHexString(pid.getNativePointer())+"'! \n" + + "Reasons: \n " + failures.stream().collect(Collectors.joining("\n ")) ); - } + } public void recompile() { - List<OpenCLDevice> devices = getDevices(); - cl_device_id[] devicesArray = new cl_device_id[devices.size()]; - for ( int i = 0; i < devicesArray.length; i++) devicesArray[ i ] = devices.get( i ).getId(); - _compile(devicesArray); - } + List<OpenCLDevice> devices = getDevices(); + cl_device_id[] devicesArray = new cl_device_id[devices.size()]; + for ( int i = 0; i < devicesArray.length; i++) devicesArray[ i ] = devices.get( i ).getId(); + _compile(devicesArray); + } /** * This is where all the kernels defined by all the {@link CLImplementation} @@ -120,9 +132,9 @@ private void _compile( cl_device_id[] devicesArray ) { //Reading all kernels! - List<String> templateSources = new ArrayList<>(); + List<String> templateSources = new ArrayList<>(); - String[] fileNames = { + String[] fileNames = { "activation_template.cl", "broadcast_template.cl", "convolution_template.cl", @@ -131,90 +143,90 @@ "scalar_broadcast.cl", "utility.cl" }; - for ( String name : fileNames ) - templateSources.add(Neureka.get().utility().readResource("kernels/"+name)); + for ( String name : fileNames ) + templateSources.add(Neureka.get().utility().readResource("kernels/"+name)); - ArrayList<String> names = new ArrayList<>(); - ArrayList<String> sources = new ArrayList<>(); - for ( int i = 0; i < fileNames.length; i++ ) + ArrayList<String> names = new ArrayList<>(); + ArrayList<String> sources = new ArrayList<>(); + for ( int i = 0; i < fileNames.length; i++ ) { - String kernelSource = templateSources.get( i ); - boolean templateFound = false; - if ( kernelSource.contains( "__kernel" ) ) + String kernelSource = templateSources.get( i ); + boolean templateFound = false; + if ( kernelSource.contains( "__kernel" ) ) { - String[] parts = kernelSource.split("__kernel")[ 1 ].split("\\(")[ 0 ].split(" "); + String[] parts = kernelSource.split("__kernel")[ 1 ].split("\\(")[ 0 ].split(" "); - templateFound = parts[parts.length - 1].contains("template"); - if ( !templateFound ) names.add(parts[parts.length - 1]); + templateFound = parts[parts.length - 1].contains("template"); + if ( !templateFound ) names.add(parts[parts.length - 1]); else { - String preName = parts[ parts.length - 1 ].replace("template", ""); + String preName = parts[ parts.length - 1 ].replace("template", ""); // Tensor t0_origin, Tensor t1_handle, Tsr t2_drain ... when d>=0 // Tsr t0_drain, Tsr t1_src1, Tsr t2_src2 // drn[di], src1[_i_of_idx_on_tln(prv_src1_cfg, rank)], src2[_i_of_idx_on_tln(prv_src2_cfg, rank)] // default: src1 o src2 -> drain // inverse: src1/fdrn <- src2 <- drain //=========================================================================== - Map<String, String> code = new HashMap<>(); - ImplementationFor<OpenCLDevice> impl = null; - for ( Operation type : Neureka.get().backend().getOperations() ) { - if ( preName.contains("activation") && type.supportsAlgorithm(ElementwiseAlgorithm.class) ) - impl = type.getAlgorithm(ElementwiseAlgorithm.class).getImplementationFor( OpenCLDevice.class ); - else if ( preName.contains("elementwise") && type.supportsAlgorithm(BiElementwise.class) ) - impl = type.getAlgorithm(BiElementwise.class).getImplementationFor( OpenCLDevice.class ); - else if ( preName.contains("scalarization") && type.supportsAlgorithm(BiScalarBroadcast.class) ) - impl = type.getAlgorithm(BiScalarBroadcast.class).getImplementationFor( OpenCLDevice.class ); - else if ( preName.contains("broadcast") && type.supportsAlgorithm(Broadcast.class) ) - impl = type.getAlgorithm(Broadcast.class).getImplementationFor( OpenCLDevice.class ); - else if ( preName.contains("convolution") && type.supportsAlgorithm(NDConvolution.class) ) - impl = type.getAlgorithm(NDConvolution.class).getImplementationFor( OpenCLDevice.class ); - else if ( - type.supportsAlgorithm(DeviceAlgorithm.class) + Map<String, String> code = new HashMap<>(); + ImplementationFor<OpenCLDevice> impl = null; + for ( Operation type : Neureka.get().backend().getOperations() ) { + if ( preName.contains("activation") && type.supportsAlgorithm(ElementwiseAlgorithm.class) ) + impl = type.getAlgorithm(ElementwiseAlgorithm.class).getImplementationFor( OpenCLDevice.class ); + else if ( preName.contains("elementwise") && type.supportsAlgorithm(BiElementwise.class) ) + impl = type.getAlgorithm(BiElementwise.class).getImplementationFor( OpenCLDevice.class ); + else if ( preName.contains("scalarization") && type.supportsAlgorithm(BiScalarBroadcast.class) ) + impl = type.getAlgorithm(BiScalarBroadcast.class).getImplementationFor( OpenCLDevice.class ); + else if ( preName.contains("broadcast") && type.supportsAlgorithm(Broadcast.class) ) + impl = type.getAlgorithm(Broadcast.class).getImplementationFor( OpenCLDevice.class ); + else if ( preName.contains("convolution") && type.supportsAlgorithm(NDConvolution.class) ) + impl = type.getAlgorithm(NDConvolution.class).getImplementationFor( OpenCLDevice.class ); + else if ( + type.supportsAlgorithm(DeviceAlgorithm.class) && - preName.contains(type.getAlgorithm(DeviceAlgorithm.class).getName()) + preName.contains(type.getAlgorithm(DeviceAlgorithm.class).getName()) ) { // TODO: cover! - impl = type.getAlgorithm(DeviceAlgorithm.class).getImplementationFor( OpenCLDevice.class ); + impl = type.getAlgorithm(DeviceAlgorithm.class).getImplementationFor( OpenCLDevice.class ); } - if ( impl instanceof CLImplementation ) { - for ( KernelCode kernelCode : ((CLImplementation) impl).getKernelCode() ) { - if (kernelCode.getCode() != null) - code.put(kernelCode.getName(), kernelCode.getCode()); + if ( impl instanceof CLImplementation ) { + for ( KernelCode kernelCode : ((CLImplementation) impl).getKernelCode() ) { + if (kernelCode.getCode() != null) + code.put(kernelCode.getName(), kernelCode.getCode()); } } - } - code.forEach( ( n, s ) -> { names.add( n ); sources.add( s ); } ); + } + code.forEach( ( n, s ) -> { names.add( n ); sources.add( s ); } ); } } - if ( !templateFound ) sources.add( kernelSource ); + if ( !templateFound ) sources.add( kernelSource ); } - for ( Operation type : Neureka.get().backend().getOperations() ) { - for ( Algorithm algorithm : type.getAllAlgorithms()) { - DeviceAlgorithm<?> deviceAlgorithm = ( algorithm instanceof DeviceAlgorithm ? ((DeviceAlgorithm<?>) algorithm) : null ); - ImplementationFor<OpenCLDevice> impl = ( deviceAlgorithm == null ? null : deviceAlgorithm.getImplementationFor(OpenCLDevice.class) ); - if ( impl instanceof CLImplementation ) { - CLImplementation cli = ((CLImplementation) impl); - if ( cli instanceof SimpleCLImplementation ) { - for ( KernelCode kernelCode : cli.getKernelCode() ) { - names.add( kernelCode.getName() ); - sources.add( kernelCode.getCode() ); + for ( Operation type : Neureka.get().backend().getOperations() ) { + for ( Algorithm algorithm : type.getAllAlgorithms()) { + DeviceAlgorithm<?> deviceAlgorithm = ( algorithm instanceof DeviceAlgorithm ? ((DeviceAlgorithm<?>) algorithm) : null ); + ImplementationFor<OpenCLDevice> impl = ( deviceAlgorithm == null ? null : deviceAlgorithm.getImplementationFor(OpenCLDevice.class) ); + if ( impl instanceof CLImplementation ) { + CLImplementation cli = ((CLImplementation) impl); + if ( cli instanceof SimpleCLImplementation ) { + for ( KernelCode kernelCode : cli.getKernelCode() ) { + names.add( kernelCode.getName() ); + sources.add( kernelCode.getCode() ); } } } } - } + } // Create the program - cl_program cpProgram = clCreateProgramWithSource( + cl_program cpProgram = clCreateProgramWithSource( _context, - sources.size(), - sources.toArray( new String[ 0 ] ), + sources.size(), + sources.toArray( new String[ 0 ] ), null, null ); // Build the program - int err = clBuildProgram( + int err = clBuildProgram( cpProgram, devicesArray.length, devicesArray, @@ -222,62 +234,62 @@ null, null ); - if ( err != CL_SUCCESS ) - _LOG.error("Failed to compile the OpenCL code of the current context. Error code: '"+err+"'."); + if ( err != CL_SUCCESS ) + _LOG.error("Failed to compile the OpenCL code of the current context. Error code: '"+err+"'."); //TODO: check compilation errors! // Create the kernels - for ( String name : names ) - if ( name != null ) _kernels.put( name, clCreateKernel( cpProgram, name, null ) ); - } + for ( String name : names ) + if ( name != null ) _kernels.put( name, clCreateKernel( cpProgram, name, null ) ); + } public List<OpenCLDevice> getDevices() { - List<OpenCLDevice> devices = new ArrayList<>(); - _id_device.forEach( ( k, v ) -> devices.add( v ) ); - return devices; + List<OpenCLDevice> devices = new ArrayList<>(); + _id_device.forEach( ( k, v ) -> devices.add( v ) ); + return devices; } /** * @param did The {@link cl_device_id} representing an OpenCL supporting device. * @return The truth value determining if this platform hosts the device represented by the provided id. */ - public boolean has( cl_device_id did ) { return _id_device.containsKey( did ); } + public boolean has( cl_device_id did ) { return _id_device.containsKey( did ); } public OpenCLDevice get( cl_device_id did ) { - return _id_device.get( did ); + return _id_device.get( did ); } void put( cl_device_id did, OpenCLDevice device ) { - _id_device.put( did, device ); - } + _id_device.put( did, device ); + } public cl_kernel getKernel( String kernelName ) { - return _kernels.get( kernelName ); + return _kernels.get( kernelName ); } public boolean hasKernel( String kernelName ) { - return _kernels.containsKey( kernelName ); + return _kernels.containsKey( kernelName ); } - public final long getId() { return _pid.getNativePointer(); } + public final long getId() { return _pid.getNativePointer(); } public cl_context getContext() { - return _context; + return _context; } public void dispose() { - clReleaseContext( _context ); - } + clReleaseContext( _context ); + } @Override public String toString() { - return this.getClass().getSimpleName()+"@"+Integer.toHexString(hashCode())+"[" + - "id=0x" + Long.toHexString(_pid.getNativePointer()) + "," + - "context=0x"+Long.toHexString(_context.getNativePointer()) + "," + - "kernels=[.."+_kernels.size()+"..]" + + return this.getClass().getSimpleName()+"@"+Integer.toHexString(hashCode())+"[" + + "id=0x" + Long.toHexString(_pid.getNativePointer()) + "," + + "context=0x"+Long.toHexString(_context.getNativePointer()) + "," + + "kernels=[.."+_kernels.size()+"..]" + "]"; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/index.html b/docs/coverage/test/html/neureka.devices.opencl/index.html index ba8a5a59f..318473f50 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/index.html +++ b/docs/coverage/test/html/neureka.devices.opencl/index.html @@ -1 +1 @@ -neureka.devices.opencl

neureka.devices.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total760 of 4,14981%79 of 36378%9136210469627178015
OpenCLDevice3441,24178%326366%4913850291218801
OpenCLPlatform14863181%126083%10521911511601
KernelCaller9745882%111963%938117712301
JVMData7467390%1113392%10901113501801
OpenCLDevice.Query5613670%3350%41383121001
KernelCode322846%60%5105152701
OpenCLDevice.CLData3790%375%15080301
OpenCLDevice.cl_tsr2187%2250%24060201
KernelCache.new LinkedHashMap() {...}86%50%13020201
OpenCLDevice.cl_dtype57100%n/a01010101
OpenCLDevice.Type45100%n/a01020101
KernelCache28100%n/a04060401
OpenCLDevice.cl_ad_hoc100%n/a01050101
OpenCLDevice.cl_tsr.cl_value100%n/a01010101
OpenCLDevice.cl_config100%n/a01010101
\ No newline at end of file +neureka.devices.opencl

neureka.devices.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total3,525 of 4,25717%236 of 36535%2883645807021621791115
OpenCLDevice1,5820%950%137138290291878801
OpenCLPlatform8160%740%5353120120161611
KernelCaller5540%300%37387677222301
OpenCLDevice.Query1920%60%13133131101011
JVMData11463384%1512989%17901613541801
OpenCLDevice.Type740%n/a11221111
KernelCode600%60%101015157711
OpenCLDevice.CLData410%40%55883311
KernelCache280%n/a44664411
OpenCLDevice.cl_tsr280%40%55773311
KernelCache.new LinkedHashMap() {...}150%20%33222211
OpenCLDevice.cl_ad_hoc0%n/a11551111
OpenCLDevice.cl_tsr.cl_value0%n/a11111111
OpenCLDevice.cl_config0%n/a11111111
OpenCLDevice.cl_dtype94100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices.opencl/index.source.html b/docs/coverage/test/html/neureka.devices.opencl/index.source.html index 7a6293514..a2c3de96f 100644 --- a/docs/coverage/test/html/neureka.devices.opencl/index.source.html +++ b/docs/coverage/test/html/neureka.devices.opencl/index.source.html @@ -1 +1 @@ -neureka.devices.opencl

neureka.devices.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total760 of 4,14981%79 of 36378%9136210469627178015
OpenCLDevice.java4071,55879%387165%56165583462310809
OpenCLPlatform.java14863181%126083%10521911511601
KernelCaller.java9745882%111963%938117712301
JVMData.java7467390%1113392%10901113501801
KernelCode.java322846%60%5105152701
KernelCache.java4195%50%17080602
\ No newline at end of file +neureka.devices.opencl

neureka.devices.opencl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total3,525 of 4,25717%236 of 36535%2883645807021621791115
OpenCLDevice.java1,938984%1090%16416634534710710979
OpenCLPlatform.java8160%740%5353120120161611
KernelCaller.java5540%300%37387677222301
JVMData.java11463384%1512989%17901613541801
KernelCode.java600%60%101015157711
KernelCache.java430%20%77886622
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.html b/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.html index de31670f9..a896c8fb8 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.html +++ b/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.html @@ -1 +1 @@ -AbstractBaseDevice

AbstractBaseDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 42100%0 of 6100%0901006
contains(Tensor)16100%4100%030301
AbstractBaseDevice()9100%n/a010301
isEmpty()7100%2100%020101
has(Tensor)4100%n/a010101
numberOfStored()3100%n/a010101
numberOfDataObjects()3100%n/a010101
\ No newline at end of file +AbstractBaseDevice

AbstractBaseDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 4295%1 of 683%1901006
contains(Tensor)21487%1375%130301
AbstractBaseDevice()9100%n/a010301
isEmpty()7100%2100%020101
has(Tensor)4100%n/a010101
numberOfStored()3100%n/a010101
numberOfDataObjects()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.java.html b/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.java.html index c72adc0c6..3b0f8e52d 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.java.html +++ b/docs/coverage/test/html/neureka.devices/AbstractBaseDevice.java.html @@ -66,7 +66,7 @@ @Override public final boolean contains( Tensor<V> o ) { Data<V> data = o.mut().getData(); - if ( data == null ) return false; + if ( data == null ) return false; return data.owner() == this; } @@ -85,4 +85,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractDevice$1$1.html b/docs/coverage/test/html/neureka.devices/AbstractDevice$1$1.html index 3a8a1d25f..981f05d23 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractDevice$1$1.html +++ b/docs/coverage/test/html/neureka.devices/AbstractDevice$1$1.html @@ -1 +1 @@ -AbstractDevice.1.new Device.Writer() {...}

AbstractDevice.1.new Device.Writer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 3860%0 of 0n/a131313
fully()150%n/a111111
intoRange(int, int)14100%n/a010101
{...}9100%n/a010101
\ No newline at end of file +AbstractDevice.1.new Device.Writer() {...}

AbstractDevice.1.new Device.Writer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 3860%0 of 0n/a131313
fully()150%n/a111111
intoRange(int, int)14100%n/a010101
{...}9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractDevice$1$2.html b/docs/coverage/test/html/neureka.devices/AbstractDevice$1$2.html index dd485f2ac..d8d9354ad 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractDevice$1$2.html +++ b/docs/coverage/test/html/neureka.devices/AbstractDevice$1$2.html @@ -1 +1 @@ -AbstractDevice.1.new Device.Writer() {...}

AbstractDevice.1.new Device.Writer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 45100%0 of 0n/a030303
fully()17100%n/a010101
intoRange(int, int)16100%n/a010101
{...}12100%n/a010101
\ No newline at end of file +AbstractDevice.1.new Device.Writer() {...}

AbstractDevice.1.new Device.Writer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 45100%0 of 0n/a030303
fully()17100%n/a010101
intoRange(int, int)16100%n/a010101
{...}12100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractDevice$1.html b/docs/coverage/test/html/neureka.devices/AbstractDevice$1.html index f0b628536..9783da77a 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractDevice$1.html +++ b/docs/coverage/test/html/neureka.devices/AbstractDevice$1.html @@ -1 +1 @@ -AbstractDevice.new Device.Access() {...}

AbstractDevice.new Device.Access() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 7090%0 of 0n/a110110110
cleanup(Runnable)70%n/a111111
{...}9100%n/a010101
readArray(Class, int, int)9100%n/a010101
writeFrom(Object, int)7100%n/a010101
readAt(int)7100%n/a010101
readAll(boolean)7100%n/a010101
write(Object)6100%n/a010101
getDataSize()6100%n/a010101
actualize()6100%n/a010101
virtualize()6100%n/a010101
\ No newline at end of file +AbstractDevice.new Device.Access() {...}

AbstractDevice.new Device.Access() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 7090%0 of 0n/a110110110
cleanup(Runnable)70%n/a111111
{...}9100%n/a010101
readArray(Class, int, int)9100%n/a010101
writeFrom(Object, int)7100%n/a010101
readAt(int)7100%n/a010101
readAll(boolean)7100%n/a010101
write(Object)6100%n/a010101
getDataSize()6100%n/a010101
actualize()6100%n/a010101
virtualize()6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractDevice.html b/docs/coverage/test/html/neureka.devices/AbstractDevice.html index 4ddd357dd..c19c9c813 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractDevice.html +++ b/docs/coverage/test/html/neureka.devices/AbstractDevice.html @@ -1 +1 @@ -AbstractDevice

AbstractDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 16493%3 of 2487%32113609
update(Component.OwnerChangeRequest)56292%21487%2901401
approve(ExecutionCall)51473%1150%121301
_writeArrayInternal(Tensor, Object, int, int, int)30100%4100%030801
_writeItemInternal(Tensor, Object, int, int)20100%2100%020501
AbstractDevice()8100%n/a010101
store(Tensor)6100%n/a010201
access(Tensor)6100%n/a010101
_cleaning(Object, Runnable)5100%n/a010101
static {...}3100%n/a010101
\ No newline at end of file +AbstractDevice

AbstractDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total87 of 16446%18 of 2425%1321193639
update(Component.OwnerChangeRequest)670%160%99141411
store(Tensor)60%n/a112211
approve(ExecutionCall)51473%1150%121301
_cleaning(Object, Runnable)50%n/a111111
_writeArrayInternal(Tensor, Object, int, int, int)42686%1375%131801
_writeItemInternal(Tensor, Object, int, int)20100%2100%020501
AbstractDevice()8100%n/a010101
access(Tensor)6100%n/a010101
static {...}3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractDevice.java.html b/docs/coverage/test/html/neureka.devices/AbstractDevice.java.html index 1241501e8..156b2083c 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractDevice.java.html +++ b/docs/coverage/test/html/neureka.devices/AbstractDevice.java.html @@ -85,28 +85,28 @@ */ @Override public boolean update( OwnerChangeRequest<Tensor<V>> changeRequest ) { - Tensor<V> oldOwner = changeRequest.getOldOwner(); - Tensor<V> newOwner = changeRequest.getNewOwner(); - if ( changeRequest.type() == IsBeing.REPLACED ) _swap( oldOwner, newOwner ); - else if ( changeRequest.type() == IsBeing.ADDED ) { - if ( newOwner.has( Relation.class ) ) { - Relation<V> relation = newOwner.get(Relation.class); - if ( relation.hasParent() ) { // Root needs to be found ! : - Tensor<V> root = relation.findRootTensor().orElseThrow(IllegalStateException::new); - if ( !this.has(root) || !root.isOutsourced() ) - throw new IllegalStateException("Data parent is not outsourced!"); + Tensor<V> oldOwner = changeRequest.getOldOwner(); + Tensor<V> newOwner = changeRequest.getNewOwner(); + if ( changeRequest.type() == IsBeing.REPLACED ) _swap( oldOwner, newOwner ); + else if ( changeRequest.type() == IsBeing.ADDED ) { + if ( newOwner.has( Relation.class ) ) { + Relation<V> relation = newOwner.get(Relation.class); + if ( relation.hasParent() ) { // Root needs to be found ! : + Tensor<V> root = relation.findRootTensor().orElseThrow(IllegalStateException::new); + if ( !this.has(root) || !root.isOutsourced() ) + throw new IllegalStateException("Data parent is not outsourced!"); } } - Device<V> found = newOwner.getMut().getData().owner(); + Device<V> found = newOwner.getMut().getData().owner(); - if ( found != null && found != this ) - found.restore( newOwner ); + if ( found != null && found != this ) + found.restore( newOwner ); } - return true; + return true; } - protected void _cleaning( Object o, Runnable action ) { _CLEANER.register( o, action ); } + protected void _cleaning( Object o, Runnable action ) { _CLEANER.register( o, action ); } /** * <b>This method plays an important role in approving a provided {@link ExecutionCall}.</b> @@ -128,8 +128,8 @@ /** {@inheritDoc} */ @Override public <T extends V> Storage<V> store( Tensor<T> tensor ) { - tensor.set( (Component) this ); // This way we move the storing procedure to the update function! - return this; + tensor.set( (Component) this ); // This way we move the storing procedure to the update function! + return this; } /** {@inheritDoc} */ @@ -170,8 +170,8 @@ int offset, int start, int size ) { DataType<?> dataType = tensor.getDataType(); - if ( dataType == null ) - dataType = _dataTypeOf( array ); + if ( dataType == null ) + dataType = _dataTypeOf( array ); Class<?> arrayType = dataType.dataArrayType(); if ( !arrayType.isAssignableFrom( array.getClass() ) ) array = DataConverter.get().convert( array, arrayType ); @@ -206,4 +206,4 @@ protected abstract DataType<?> _dataTypeOf( Object rawData ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractDeviceData.html b/docs/coverage/test/html/neureka.devices/AbstractDeviceData.html index 9bc2599f9..d4f9dca9e 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractDeviceData.html +++ b/docs/coverage/test/html/neureka.devices/AbstractDeviceData.html @@ -1 +1 @@ -AbstractDeviceData

AbstractDeviceData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 11596%0 of 8100%11312919
usages()40%n/a111111
AbstractDeviceData(AbstractBaseDevice, Object, DataType, Runnable)46100%n/a0101101
decrementUsageCount()16100%2100%020301
incrementUsageCount()15100%2100%020301
lambda$new$0(AbstractBaseDevice, Runnable, ReferenceCounter.ChangeEvent)13100%2100%020401
lambda$new$1(ReferenceCounter, AbstractBaseDevice)12100%2100%020401
owner()3100%n/a010101
getOrNull()3100%n/a010101
dataType()3100%n/a010101
\ No newline at end of file +AbstractDeviceData

AbstractDeviceData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 11596%0 of 8100%11312919
usages()40%n/a111111
AbstractDeviceData(AbstractBaseDevice, Object, DataType, Runnable)46100%n/a0101101
decrementUsageCount()16100%2100%020301
incrementUsageCount()15100%2100%020301
lambda$new$0(AbstractBaseDevice, Runnable, ReferenceCounter.ChangeEvent)13100%2100%020401
lambda$new$1(ReferenceCounter, AbstractBaseDevice)12100%2100%020401
owner()3100%n/a010101
getOrNull()3100%n/a010101
dataType()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/AbstractDeviceData.java.html b/docs/coverage/test/html/neureka.devices/AbstractDeviceData.java.html index f784dcb52..cf16d8781 100644 --- a/docs/coverage/test/html/neureka.devices/AbstractDeviceData.java.html +++ b/docs/coverage/test/html/neureka.devices/AbstractDeviceData.java.html @@ -57,4 +57,4 @@ @Override public final int usages() { return _refCounter.count(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner$ReferenceWithCleanup.html b/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner$ReferenceWithCleanup.html index e052b0476..181d9527b 100644 --- a/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner$ReferenceWithCleanup.html +++ b/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner$ReferenceWithCleanup.html @@ -1 +1 @@ -CustomDeviceCleaner.ReferenceWithCleanup

CustomDeviceCleaner.ReferenceWithCleanup

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 12100%0 of 0n/a020502
CustomDeviceCleaner.ReferenceWithCleanup(Object, Runnable, ReferenceQueue)8100%n/a010301
cleanup()4100%n/a010201
\ No newline at end of file +CustomDeviceCleaner.ReferenceWithCleanup

CustomDeviceCleaner.ReferenceWithCleanup

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 3354%1 of 250%132902
cleanup()151040%1150%122601
CustomDeviceCleaner.ReferenceWithCleanup(Object, Runnable, ReferenceQueue)8100%n/a010301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.html b/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.html index 6114787c3..96930494f 100644 --- a/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.html +++ b/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.html @@ -1 +1 @@ -CustomDeviceCleaner

CustomDeviceCleaner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 10693%1 of 683%1742404
run()72275%1375%1341201
register(Object, Runnable)36100%2100%020601
toString()22100%n/a010101
CustomDeviceCleaner()19100%n/a010501
\ No newline at end of file +CustomDeviceCleaner

CustomDeviceCleaner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total43 of 18576%4 of 1471%514154917
register(Object, Runnable)214668%1583%1461701
run()102772%2466%2441201
checkCleanup()101762%1150%1241001
getInstance()20%n/a111111
toString()23100%n/a010201
CustomDeviceCleaner()21100%n/a010501
static {...}8100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.java.html b/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.java.html index d29602395..ad1c2ffae 100644 --- a/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.java.html +++ b/docs/coverage/test/html/neureka.devices/CustomDeviceCleaner.java.html @@ -1,5 +1,8 @@ CustomDeviceCleaner.java

CustomDeviceCleaner.java

package neureka.devices;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.lang.ref.PhantomReference;
 import java.lang.ref.ReferenceQueue;
 import java.util.ArrayList;
@@ -15,63 +18,114 @@
  *  anywhere but within this library. <br>
  *  This class or its public methods might change or get removed in future versions!</b>
  */
-final class CustomDeviceCleaner implements DeviceCleaner, Runnable
+final class CustomDeviceCleaner implements DeviceCleaner
 {
-    private final ReferenceQueue<Object> _referenceQueue = new ReferenceQueue<>();
-    private final long _timeout = 60 * 1000;
-    private int _registered = 0;
+    private static final Logger log = LoggerFactory.getLogger(CustomDeviceCleaner.class);
+    private static final CustomDeviceCleaner _INSTANCE = new CustomDeviceCleaner();
+    private static final long _QUEUE_TIMEOUT = 60 * 1000;
+
+    private final ReferenceQueue<Object> _referenceQueue = new ReferenceQueue<>();
+    private final List<ReferenceWithCleanup<Object>> _toBeCleaned = new ArrayList<>();
+    private final Thread _thread;
+
+
+    public static CustomDeviceCleaner getInstance() {
+        return _INSTANCE;
+    }
+
+    CustomDeviceCleaner() {
+        _thread = new Thread(this::run, "Neureka-Cleaner");
+    }
 
-    List<Object> list = new ArrayList<>();
 
     static class ReferenceWithCleanup<T> extends PhantomReference<T>
     {
-        private final Runnable _action;
+        private Runnable _action;
 
-        ReferenceWithCleanup(T o, Runnable action, ReferenceQueue<T> queue) {
-            super( o, queue );
-            _action = action;
-        }
+        ReferenceWithCleanup( T o, Runnable action, ReferenceQueue<T> queue ) {
+            super( o, queue );
+            _action = action;
+        }
         public void cleanup() {
-            _action.run();
-        }
+            if ( _action != null ) {
+                try {
+                    _action.run();
+                } catch (Exception e) {
+                    log.error("Failed to execute cleanup action '"+_action+"'.", e);
+                } finally {
+                    _action = null;
+                }
+            }
+        }
     }
 
-    @Override
-    public void register(Object o, Runnable action) {
-        synchronized ( _referenceQueue ) {
-            list.add(new ReferenceWithCleanup<Object>(o, action, _referenceQueue));
-            _registered++;
-            if ( _registered == 1 ) new Thread( this::run ).start();
-        }
-    }
-
-    @Override
-    public void run() {
-        while ( _registered > 0 ) {
+    public void register( Object o, Runnable action ) {
+        if ( o == null ) {
+            log.warn("Attempt to register a null object for cleanup. This is not allowed!");
             try {
-                ReferenceWithCleanup ref = (ReferenceWithCleanup) _referenceQueue.remove(_timeout);
-                if ( ref != null ) {
-                    try {
-                        ref.cleanup();
-                    } catch ( Throwable e ) {
-                        e.printStackTrace();
-                        // ignore exceptions from the cleanup action
-                        // (including interruption of cleanup thread)
-                    }
-                    _registered--;
+                action.run();
+            } catch (Exception e) {
+                log.error("Failed to execute cleanup action '"+action+"'.", e);
+            }
+            return;
+        }
+        synchronized ( _referenceQueue ) {
+            _toBeCleaned.add(new ReferenceWithCleanup<>(o, action, _referenceQueue));
+            if ( _toBeCleaned.size() == 1 ) {
+                if ( !_thread.isAlive() ) {
+                    _thread.start();
                 }
-            } catch ( Throwable e ) {
-                e.printStackTrace(); // The queue failed
-            }
+                else {
+                    // We notify the cleaner thread that there are new items to be cleaned
+                    synchronized ( _thread ) {
+                        _thread.notify();
+                    }
+                }
+            }
+        }
+    }
+
+    private void run() {
+        if ( !_thread.isAlive() ) {
+            _thread.start();
         }
-    }
+        while ( _thread.isAlive() ) {
+            while ( !_toBeCleaned.isEmpty() ) {
+                checkCleanup();
+            }
+            try {
+                synchronized ( _thread ) {
+                    _thread.wait();
+                }
+            } catch (Exception e) {
+                log.error("Failed to make cleaner thread wait for cleaning notification!", e);
+            }
+        }
+    }
+
+    private void checkCleanup() {
+        try {
+            ReferenceWithCleanup<Object> ref = (ReferenceWithCleanup<Object>) _referenceQueue.remove(_QUEUE_TIMEOUT);
+            if ( ref != null ) {
+                try {
+                    ref.cleanup();
+                } catch ( Throwable e ) {
+                    log.error("Failed to perform cleanup!", e);
+                } finally {
+                    _toBeCleaned.remove(ref);
+                }
+            }
+        } catch ( Throwable e ) {
+            log.error("Failed to call 'remove()' on cleaner internal queue.", e);
+        }
+    }
 
     @Override
     public String toString() {
-        return this.getClass().getSimpleName()+"@"+Integer.toHexString(this.hashCode())+"[" +
-                    "registered=" + _registered +
+        return this.getClass().getSimpleName()+"@"+Integer.toHexString(this.hashCode())+"[" +
+                    "registered=" + _toBeCleaned.size() +
                 "]";
     }
 
 }
-
\ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/Device$1.html b/docs/coverage/test/html/neureka.devices/Device$1.html index 1d1300cf3..067235365 100644 --- a/docs/coverage/test/html/neureka.devices/Device$1.html +++ b/docs/coverage/test/html/neureka.devices/Device$1.html @@ -1 +1 @@ -Device.new Device.In() {...}

Device.new Device.In() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 67100%1 of 683%150702
in(Supplier)55100%1583%140601
{...}12100%n/a010101
\ No newline at end of file +Device.new Device.In() {...}

Device.new Device.In() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 67100%1 of 683%150702
in(Supplier)55100%1583%140601
{...}12100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/Device$Access.html b/docs/coverage/test/html/neureka.devices/Device$Access.html index c46ed8aee..ecbe14582 100644 --- a/docs/coverage/test/html/neureka.devices/Device$Access.html +++ b/docs/coverage/test/html/neureka.devices/Device$Access.html @@ -1 +1 @@ -Device.Access

Device.Access

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010101
writeFrom(Object)6100%n/a010101
\ No newline at end of file +Device.Access

Device.Access

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 6100%0 of 0n/a010101
writeFrom(Object)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/Device$Writer.html b/docs/coverage/test/html/neureka.devices/Device$Writer.html index 2bda1ca1a..f7cb7a8ec 100644 --- a/docs/coverage/test/html/neureka.devices/Device$Writer.html +++ b/docs/coverage/test/html/neureka.devices/Device$Writer.html @@ -1 +1 @@ -Device.Writer

Device.Writer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 7100%0 of 0n/a010101
at(int)7100%n/a010101
\ No newline at end of file +Device.Writer

Device.Writer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 7100%0 of 0n/a010101
at(int)7100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/Device.html b/docs/coverage/test/html/neureka.devices/Device.html index 201712175..b85ed1a61 100644 --- a/docs/coverage/test/html/neureka.devices/Device.html +++ b/docs/coverage/test/html/neureka.devices/Device.html @@ -1 +1 @@ -Device

Device

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 15295%3 of 1070%31302908
get(Class, String[])71973%1150%120401
borrow(Tensor, Tensor[])44100%1375%130801
optimizedFunctionOf(Function, String)39100%1150%1201001
allocate(DataType, int)13100%n/a010101
any(String[])10100%2100%020201
get(String[])10100%n/a010201
find(String[])5100%n/a010101
find(Class, String[])5100%n/a010101
\ No newline at end of file +Device

Device

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total56 of 15263%5 of 1050%513112918
optimizedFunctionOf(Function, String)390%20%22101011
borrow(Tensor, Tensor[])103477%2250%231801
get(Class, String[])71973%1150%120401
allocate(DataType, int)13100%n/a010101
any(String[])10100%2100%020201
get(String[])10100%n/a010201
find(String[])5100%n/a010101
find(Class, String[])5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/Device.java.html b/docs/coverage/test/html/neureka.devices/Device.java.html index 62918c248..b0d76653f 100644 --- a/docs/coverage/test/html/neureka.devices/Device.java.html +++ b/docs/coverage/test/html/neureka.devices/Device.java.html @@ -226,19 +226,19 @@ * @return An instance of the optimized function. */ default Function optimizedFunctionOf( Function function, String name ) { - LogUtil.nullArgCheck( function, "function", Function.class ); - LogUtil.nullArgCheck( name, "name", String.class ); + LogUtil.nullArgCheck( function, "function", Function.class ); + LogUtil.nullArgCheck( name, "name", String.class ); - Operation optimizedOperation = optimizedOperationOf( function, name ); - BackendContext currentContext = Neureka.get().backend(); - if ( !currentContext.hasOperation( optimizedOperation ) ) - currentContext.addOperation( optimizedOperation ); + Operation optimizedOperation = optimizedOperationOf( function, name ); + BackendContext currentContext = Neureka.get().backend(); + if ( !currentContext.hasOperation( optimizedOperation ) ) + currentContext.addOperation( optimizedOperation ); - return new FunctionParser( currentContext ) - .parse( + return new FunctionParser( currentContext ) + .parse( optimizedOperation, - function.numberOfArgs(), - function.isDoingAD() + function.numberOfArgs(), + function.isDoingAD() ); } @@ -261,8 +261,8 @@ LogUtil.nullArgCheck( rest, "rest", Tensor[].class ); List<Tensor<V>> tensors = new ArrayList<>(); if ( first != null ) tensors.add( first ); - if ( rest.length > 0 ) - tensors.addAll( Arrays.stream( rest ).filter(Objects::nonNull).collect(Collectors.toList()) ); + if ( rest.length > 0 ) + tensors.addAll( Arrays.stream( rest ).filter(Objects::nonNull).collect(Collectors.toList()) ); Device<?> thisDevice = this; return new In() { @Override @@ -408,4 +408,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/DeviceCleaner.html b/docs/coverage/test/html/neureka.devices/DeviceCleaner.html index 27810451c..670a3c4bd 100644 --- a/docs/coverage/test/html/neureka.devices/DeviceCleaner.html +++ b/docs/coverage/test/html/neureka.devices/DeviceCleaner.html @@ -1 +1 @@ -DeviceCleaner

DeviceCleaner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
static {...}5100%n/a010101
getNewInstance()4100%n/a010101
\ No newline at end of file +DeviceCleaner

DeviceCleaner

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
static {...}5100%n/a010101
getNewInstance()4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/DeviceCleaner.java.html b/docs/coverage/test/html/neureka.devices/DeviceCleaner.java.html index 339001372..fa7b33249 100644 --- a/docs/coverage/test/html/neureka.devices/DeviceCleaner.java.html +++ b/docs/coverage/test/html/neureka.devices/DeviceCleaner.java.html @@ -8,4 +8,4 @@ void register( Object o, Runnable action ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/Query.html b/docs/coverage/test/html/neureka.devices/Query.html index 1aebcd4f2..0ee3db040 100644 --- a/docs/coverage/test/html/neureka.devices/Query.html +++ b/docs/coverage/test/html/neureka.devices/Query.html @@ -1 +1 @@ -Query

Query

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 37997%7 of 4684%734041011
_queryInternal(Class, String[])714095%63485%62102601
lambda$query$2(String)685%1150%120101
static {...}142100%n/a010301
query(Class, String[])22100%n/a0101001
lambda$query$4(String)19100%2100%020101
lambda$query$3(String)19100%2100%020101
lambda$_queryInternal$7(String[], String)6100%n/a010201
lambda$query$1(String)5100%n/a010101
lambda$query$0(String)5100%n/a010101
lambda$_queryInternal$6(String, String)4100%n/a010101
lambda$query$5(int)3100%n/a010101
\ No newline at end of file +Query

Query

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total57 of 37984%20 of 4656%1534441011
_queryInternal(Class, String[])3211578%172357%122142601
lambda$query$4(String)12736%1150%120101
lambda$query$3(String)12736%1150%120101
lambda$query$2(String)685%1150%120101
static {...}142100%n/a010301
query(Class, String[])22100%n/a0101001
lambda$_queryInternal$7(String[], String)6100%n/a010201
lambda$query$1(String)5100%n/a010101
lambda$query$0(String)5100%n/a010101
lambda$_queryInternal$6(String, String)4100%n/a010101
lambda$query$5(int)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/Query.java.html b/docs/coverage/test/html/neureka.devices/Query.java.html index 95d5a6cbd..c9618796e 100644 --- a/docs/coverage/test/html/neureka.devices/Query.java.html +++ b/docs/coverage/test/html/neureka.devices/Query.java.html @@ -33,8 +33,8 @@ .flatMap( key -> Arrays.stream(key.split("\\|\\|")) ) .map(String::trim) .filter( key -> !key.isEmpty() ) - .flatMap( key -> key.equals("amd") ? Stream.of("amd", "advanced micro devices") : Stream.of(key) ) - .flatMap( key -> key.equals("nvidia") ? Stream.of("nvidia", "nvidia corporation") : Stream.of(key) ) + .flatMap( key -> key.equals("amd") ? Stream.of("amd", "advanced micro devices") : Stream.of(key) ) + .flatMap( key -> key.equals("nvidia") ? Stream.of("nvidia", "nvidia corporation") : Stream.of(key) ) .toArray(String[]::new); return _queryInternal( deviceType, flattened ); @@ -68,11 +68,11 @@ BackendExtension.DeviceOption found = extension.find( currentKey ); if ( found == null ) continue; if ( found.device() == null ) continue; - if ( found.confidence() <= 0 ) continue; - if ( !deviceType.isAssignableFrom( found.device().getClass() ) ) continue; - if ( found.confidence() > ACCEPTABILITY && found.confidence() > desireForCPU || (justTakeFirstOne && probablyWantsGPU) ) - return (D) found.device(); - } + if ( found.confidence() <= 0 ) continue; + if ( !deviceType.isAssignableFrom( found.device().getClass() ) ) continue; + if ( found.confidence() > ACCEPTABILITY && found.confidence() > desireForCPU || (justTakeFirstOne && probablyWantsGPU) ) + return (D) found.device(); + } if ( probablyWantsGPU ) return null; // User wants OpenCL but cannot have it :/ @@ -83,4 +83,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeEvent.html b/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeEvent.html index 196dab16b..be89b7077 100644 --- a/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeEvent.html +++ b/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeEvent.html @@ -1 +1 @@ -ReferenceCounter.ChangeEvent

ReferenceCounter.ChangeEvent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2185%0 of 0n/a141814
type()30%n/a111111
ReferenceCounter.ChangeEvent(ReferenceCounter.ChangeType, int, int)12100%n/a010501
change()3100%n/a010101
currentCount()3100%n/a010101
\ No newline at end of file +ReferenceCounter.ChangeEvent

ReferenceCounter.ChangeEvent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 2185%0 of 0n/a141814
type()30%n/a111111
ReferenceCounter.ChangeEvent(ReferenceCounter.ChangeType, int, int)12100%n/a010501
change()3100%n/a010101
currentCount()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeType.html b/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeType.html index 32cbcbb0b..ae1b73857 100644 --- a/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeType.html +++ b/docs/coverage/test/html/neureka.devices/ReferenceCounter$ChangeType.html @@ -1 +1 @@ -ReferenceCounter.ChangeType

ReferenceCounter.ChangeType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 21100%0 of 0n/a010101
static {...}21100%n/a010101
\ No newline at end of file +ReferenceCounter.ChangeType

ReferenceCounter.ChangeType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 34100%0 of 0n/a010101
static {...}34100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/ReferenceCounter.html b/docs/coverage/test/html/neureka.devices/ReferenceCounter.html index 36294f21e..b8ba4b0f6 100644 --- a/docs/coverage/test/html/neureka.devices/ReferenceCounter.html +++ b/docs/coverage/test/html/neureka.devices/ReferenceCounter.html @@ -1 +1 @@ -ReferenceCounter

ReferenceCounter

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 9183%3 of 862%3901605
fullDelete()52281%1375%130501
increment()52080%1150%120401
decrement()52080%1150%120401
ReferenceCounter(Consumer)11100%n/a010201
count()3100%n/a010101
\ No newline at end of file +ReferenceCounter

ReferenceCounter

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 9183%3 of 862%3901605
fullDelete()52281%1375%130501
increment()52080%1150%120401
decrement()52080%1150%120401
ReferenceCounter(Consumer)11100%n/a010201
count()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/ReferenceCounter.java.html b/docs/coverage/test/html/neureka.devices/ReferenceCounter.java.html index de28e8224..200485cb9 100644 --- a/docs/coverage/test/html/neureka.devices/ReferenceCounter.java.html +++ b/docs/coverage/test/html/neureka.devices/ReferenceCounter.java.html @@ -51,4 +51,4 @@ public int count() { return _count; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/index.html b/docs/coverage/test/html/neureka.devices/index.html index f61c0a358..16e3cdaa1 100644 --- a/docs/coverage/test/html/neureka.devices/index.html +++ b/docs/coverage/test/html/neureka.devices/index.html @@ -1 +1 @@ -neureka.devices

neureka.devices

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total76 of 1,34594%18 of 11484%221389222481017
ReferenceCounter157683%3562%390160501
AbstractDevice.1.new Device.Writer() {...}152360%n/a13131301
AbstractDevice1015493%32187%3211360901
Query837197%73984%73404101101
Device714595%3770%3130290801
CustomDeviceCleaner79993%1583%174240401
AbstractDevice.new Device.Access() {...}76390%n/a11011011001
AbstractDeviceData411196%8100%1131291901
ReferenceCounter.ChangeEvent1885%n/a14181401
Device.new Device.In() {...}67100%1583%15070201
AbstractDevice.1.new Device.Writer() {...}45100%n/a03030301
AbstractBaseDevice42100%6100%090100601
ReferenceCounter.ChangeType21100%n/a01010101
CustomDeviceCleaner.ReferenceWithCleanup12100%n/a02050201
DeviceCleaner9100%n/a02020201
Device.Writer7100%n/a01010101
Device.Access6100%n/a01010101
\ No newline at end of file +neureka.devices

neureka.devices

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total304 of 1,45879%53 of 12457%4814655251984017
AbstractDevice877746%18625%132119363901
Query5732284%202656%153444101101
Device569663%5550%51311291801
CustomDeviceCleaner4314276%41071%51415491701
ReferenceCounter157683%3562%390160501
AbstractDevice.1.new Device.Writer() {...}152360%n/a13131301
CustomDeviceCleaner.ReferenceWithCleanup151854%1150%13290201
AbstractDevice.new Device.Access() {...}76390%n/a11011011001
AbstractDeviceData411196%8100%1131291901
ReferenceCounter.ChangeEvent1885%n/a14181401
AbstractBaseDevice4095%1583%190100601
Device.new Device.In() {...}67100%1583%15070201
AbstractDevice.1.new Device.Writer() {...}45100%n/a03030301
ReferenceCounter.ChangeType34100%n/a01010101
DeviceCleaner9100%n/a02020201
Device.Writer7100%n/a01010101
Device.Access6100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.devices/index.source.html b/docs/coverage/test/html/neureka.devices/index.source.html index 9ee012585..0e6ff1102 100644 --- a/docs/coverage/test/html/neureka.devices/index.source.html +++ b/docs/coverage/test/html/neureka.devices/index.source.html @@ -1 +1 @@ -neureka.devices

neureka.devices

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total76 of 1,34594%18 of 11484%221389222481017
AbstractDevice.java3228589%32187%53734922504
ReferenceCounter.java1811586%3562%41412511003
Query.java837197%73984%73404101101
Device.java722596%41275%42003701204
CustomDeviceCleaner.java711194%1583%194290602
AbstractDeviceData.java411196%8100%1131291901
AbstractBaseDevice.java42100%6100%090100601
DeviceCleaner.java9100%n/a02020201
\ No newline at end of file +neureka.devices

neureka.devices

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total304 of 1,45879%53 of 12457%4814655251984017
AbstractDevice.java10920865%18625%1537214952504
CustomDeviceCleaner.java5816073%51168%61717581902
Query.java5732284%202656%153444101101
Device.java5617675%61062%620113711204
ReferenceCounter.java1812887%3562%41412511003
AbstractDeviceData.java411196%8100%1131291901
AbstractBaseDevice.java4095%1583%190100601
DeviceCleaner.java9100%n/a02020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.html b/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.html index 96cffedd6..04c549b81 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.html +++ b/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.html @@ -1 +1 @@ -AbstractNumericType

AbstractNumericType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 72100%0 of 2100%0501704
static {...}50100%n/a0101101
writeDataTo(DataOutput, Iterator)13100%2100%020401
getNumericTypeTarget()6100%n/a010101
AbstractNumericType()3100%n/a010101
\ No newline at end of file +AbstractNumericType

AbstractNumericType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 72100%0 of 2100%0501704
static {...}50100%n/a0101101
writeDataTo(DataOutput, Iterator)13100%2100%020401
getNumericTypeTarget()6100%n/a010101
AbstractNumericType()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.java.html b/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.java.html index 9d5d43803..60b43244c 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/AbstractNumericType.java.html @@ -56,4 +56,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/F32.html b/docs/coverage/test/html/neureka.dtype.custom/F32.html index ab77ffa58..1fcf4f81a 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/F32.html +++ b/docs/coverage/test/html/neureka.dtype.custom/F32.html @@ -1 +1 @@ -F32

F32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 17793%1 of 1693%326336218
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
targetToForeignHolderBytes(Float)33100%n/a010201
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
readForeignDataFrom(DataInput, int)17100%2100%020301
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
F32()3100%n/a010201
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Float)2100%n/a010101
\ No newline at end of file +F32

F32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 17793%1 of 1693%326336218
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
targetToForeignHolderBytes(Float)33100%n/a010201
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
readForeignDataFrom(DataInput, int)17100%2100%020301
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
F32()3100%n/a010201
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Float)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/F32.java.html b/docs/coverage/test/html/neureka.dtype.custom/F32.java.html index c8009560c..63e9a15ba 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/F32.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/F32.java.html @@ -107,4 +107,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/F64.html b/docs/coverage/test/html/neureka.dtype.custom/F64.html index 93e466a80..de6fb640a 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/F64.html +++ b/docs/coverage/test/html/neureka.dtype.custom/F64.html @@ -1 +1 @@ -F64

F64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 25185%3 of 1883%628738419
readAndConvertForeignDataFrom(Iterator, int)200%20%223311
readForeignDataFrom(DataInput, int)50%n/a111111
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
targetToForeignHolderBytes(Double)87100%n/a010201
readForeignDataFrom(Iterator, int)20100%2100%020301
_readFrom(DataInput, int)17100%2100%020301
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
F64()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Double)2100%n/a010101
\ No newline at end of file +F64

F64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 25185%3 of 1883%628738419
readAndConvertForeignDataFrom(Iterator, int)200%20%223311
readForeignDataFrom(DataInput, int)50%n/a111111
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
targetToForeignHolderBytes(Double)87100%n/a010201
readForeignDataFrom(Iterator, int)20100%2100%020301
_readFrom(DataInput, int)17100%2100%020301
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
F64()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Double)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/F64.java.html b/docs/coverage/test/html/neureka.dtype.custom/F64.java.html index 7f94b6d4e..4146e5972 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/F64.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/F64.java.html @@ -113,4 +113,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I16.html b/docs/coverage/test/html/neureka.dtype.custom/I16.html index d3b7de7d3..67085c9f0 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I16.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I16.html @@ -1 +1 @@ -I16

I16

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 17690%1 of 1693%427439319
readForeignDataFrom(DataInput, int)50%n/a111111
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
foreignHolderBytesToTarget(byte[])17100%n/a010301
_readData(DataInput, int)17100%2100%020301
targetToForeignHolderBytes(Short)11100%n/a010201
I16()7100%n/a010201
convertToTargetArray(Object)6100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Short)2100%n/a010101
\ No newline at end of file +I16

I16

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 17690%1 of 1693%427439319
readForeignDataFrom(DataInput, int)50%n/a111111
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
foreignHolderBytesToTarget(byte[])17100%n/a010301
_readData(DataInput, int)17100%2100%020301
targetToForeignHolderBytes(Short)11100%n/a010201
I16()7100%n/a010201
convertToTargetArray(Object)6100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Short)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I16.java.html b/docs/coverage/test/html/neureka.dtype.custom/I16.java.html index e77b729a9..b17e1dfcb 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I16.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I16.java.html @@ -109,4 +109,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I32.html b/docs/coverage/test/html/neureka.dtype.custom/I32.html index 9ba55d566..55c7c0a01 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I32.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I32.html @@ -1 +1 @@ -I32

I32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 20392%1 of 1693%427442319
readForeignDataFrom(DataInput, int)50%n/a111111
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)26096%11191%1711301
targetToForeignHolderBytes(Integer)43100%n/a010501
_readData(DataInput, int)26100%2100%020601
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
I32()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Integer)2100%n/a010101
\ No newline at end of file +I32

I32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 20392%1 of 1693%427442319
readForeignDataFrom(DataInput, int)50%n/a111111
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)26096%11191%1711301
targetToForeignHolderBytes(Integer)43100%n/a010501
_readData(DataInput, int)26100%2100%020601
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
I32()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Integer)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I32.java.html b/docs/coverage/test/html/neureka.dtype.custom/I32.java.html index 46adb5bf8..4e98de524 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I32.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I32.java.html @@ -113,4 +113,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I64.html b/docs/coverage/test/html/neureka.dtype.custom/I64.html index 29179c857..7c7fa9544 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I64.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I64.html @@ -1 +1 @@ -I64

I64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 11792%0 of 4100%220226218
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
foreignHolderBytesToTarget(byte[])17100%n/a010301
readForeignDataFrom(DataInput, int)17100%2100%020301
targetToForeignHolderBytes(Long)11100%n/a010201
I64()7100%n/a010201
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Long)2100%n/a010101
\ No newline at end of file +I64

I64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 11792%0 of 4100%220226218
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
foreignHolderBytesToTarget(byte[])17100%n/a010301
readForeignDataFrom(DataInput, int)17100%2100%020301
targetToForeignHolderBytes(Long)11100%n/a010201
I64()7100%n/a010201
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
readAndConvertForeignDataFrom(DataInput, int)5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Long)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I64.java.html b/docs/coverage/test/html/neureka.dtype.custom/I64.java.html index 4a399e364..7b4361ee7 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I64.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I64.java.html @@ -90,4 +90,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I8.html b/docs/coverage/test/html/neureka.dtype.custom/I8.html index 32a4936ae..2dddfbc97 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I8.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I8.html @@ -1 +1 @@ -I8

I8

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 15786%1 of 1693%426636318
readForeignDataFrom(DataInput, int)100%n/a113311
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
readAndConvertForeignDataFrom(DataInput, int)17100%2100%020301
targetToForeignHolderBytes(Byte)8100%n/a010101
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
I8()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Byte)2100%n/a010101
\ No newline at end of file +I8

I8

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 15786%1 of 1693%426636318
readForeignDataFrom(DataInput, int)100%n/a113311
convertToHolderArray(Object)50%n/a111111
convertToTarget(Object)40%n/a111111
convertToHolder(Object)25896%11191%1711301
readAndConvertForeignDataFrom(Iterator, int)20100%2100%020301
readAndConvertForeignDataFrom(DataInput, int)17100%2100%020301
targetToForeignHolderBytes(Byte)8100%n/a010101
convertToTargetArray(Object)6100%n/a010101
foreignHolderBytesToTarget(byte[])5100%n/a010101
readForeignDataFrom(Iterator, int)5100%n/a010101
I8()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
holderType()2100%n/a010101
holderArrayType()2100%n/a010101
toTarget(Byte)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/I8.java.html b/docs/coverage/test/html/neureka.dtype.custom/I8.java.html index 295b142d1..5c9a996a8 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/I8.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/I8.java.html @@ -101,4 +101,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI16.html b/docs/coverage/test/html/neureka.dtype.custom/UI16.html index 58e7c20fd..39794e313 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI16.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI16.html @@ -1 +1 @@ -UI16

UI16

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 10156%2 of 20%9191326818
readAndConvertForeignDataFrom(DataInput, int)260%20%226611
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(DataInput, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
convertToTarget(Object)20%n/a111111
foreignHolderBytesToTarget(byte[])17100%n/a010201
targetToForeignHolderBytes(Integer)12100%n/a010301
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
toTarget(Short)5100%n/a010101
UI16()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file +UI16

UI16

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total44 of 10156%2 of 20%9191326818
readAndConvertForeignDataFrom(DataInput, int)260%20%226611
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(DataInput, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
convertToTarget(Object)20%n/a111111
foreignHolderBytesToTarget(byte[])17100%n/a010201
targetToForeignHolderBytes(Integer)12100%n/a010301
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
toTarget(Short)5100%n/a010101
UI16()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI16.java.html b/docs/coverage/test/html/neureka.dtype.custom/UI16.java.html index a481aba4e..f0038aef7 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI16.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI16.java.html @@ -91,4 +91,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI32.html b/docs/coverage/test/html/neureka.dtype.custom/UI32.html index 2d212a911..2965ea176 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI32.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI32.html @@ -1 +1 @@ -UI32

UI32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 12282%0 of 0n/a818822818
readAndConvertForeignDataFrom(DataInput, int)30%n/a111111
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(DataInput, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
convertToTarget(Object)20%n/a111111
targetToForeignHolderBytes(Long)38100%n/a010401
foreignHolderBytesToTarget(byte[])35100%n/a010201
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
toTarget(Integer)5100%n/a010101
UI32()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file +UI32

UI32

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 12282%0 of 0n/a818822818
readAndConvertForeignDataFrom(DataInput, int)30%n/a111111
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(DataInput, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
convertToTarget(Object)20%n/a111111
targetToForeignHolderBytes(Long)38100%n/a010401
foreignHolderBytesToTarget(byte[])35100%n/a010201
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
toTarget(Integer)5100%n/a010101
UI32()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI32.java.html b/docs/coverage/test/html/neureka.dtype.custom/UI32.java.html index 73ab3148d..0060e9bbf 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI32.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI32.java.html @@ -83,4 +83,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI64.html b/docs/coverage/test/html/neureka.dtype.custom/UI64.html index 9aba47951..cdec65843 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI64.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI64.html @@ -1 +1 @@ -UI64

UI64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 9277%0 of 0n/a818826818
readAndConvertForeignDataFrom(DataInput, int)30%n/a111111
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(DataInput, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
convertToTarget(Object)20%n/a111111
targetToForeignHolderBytes(BigInteger)22100%n/a010601
foreignHolderBytesToTarget(byte[])19100%n/a010401
toTarget(Long)7100%n/a010101
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
UI64()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file +UI64

UI64

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total21 of 9277%0 of 0n/a818826818
readAndConvertForeignDataFrom(DataInput, int)30%n/a111111
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(DataInput, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
convertToTarget(Object)20%n/a111111
targetToForeignHolderBytes(BigInteger)22100%n/a010601
foreignHolderBytesToTarget(byte[])19100%n/a010401
toTarget(Long)7100%n/a010101
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
UI64()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI64.java.html b/docs/coverage/test/html/neureka.dtype.custom/UI64.java.html index 3402fe7b1..023623b8c 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI64.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI64.java.html @@ -92,4 +92,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI8.html b/docs/coverage/test/html/neureka.dtype.custom/UI8.html index 9cc166c8c..5559e3e24 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI8.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI8.html @@ -1 +1 @@ -UI8

UI8

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total98 of 17243%14 of 1612%14262440718
convertToTarget(Object)600%120%77131311
readForeignDataFrom(DataInput, int)250%20%226611
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
readAndConvertForeignDataFrom(DataInput, int)26100%2100%020601
targetToForeignHolderBytes(Short)11100%n/a010101
foreignHolderBytesToTarget(byte[])8100%n/a010101
toTarget(Byte)6100%n/a010101
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
UI8()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file +UI8

UI8

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total98 of 17243%14 of 1612%14262440718
convertToTarget(Object)600%120%77131311
readForeignDataFrom(DataInput, int)250%20%226611
readAndConvertForeignDataFrom(Iterator, int)30%n/a111111
readForeignDataFrom(Iterator, int)30%n/a111111
convertToHolderArray(Object)30%n/a111111
holderType()20%n/a111111
holderArrayType()20%n/a111111
readAndConvertForeignDataFrom(DataInput, int)26100%2100%020601
targetToForeignHolderBytes(Short)11100%n/a010101
foreignHolderBytesToTarget(byte[])8100%n/a010101
toTarget(Byte)6100%n/a010101
convertToHolder(Object)6100%n/a010101
convertToTargetArray(Object)6100%n/a010101
UI8()3100%n/a010101
signed()2100%n/a010101
numberOfBytes()2100%n/a010101
targetType()2100%n/a010101
targetArrayType()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/UI8.java.html b/docs/coverage/test/html/neureka.dtype.custom/UI8.java.html index 055ba6e71..c7011afb9 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/UI8.java.html +++ b/docs/coverage/test/html/neureka.dtype.custom/UI8.java.html @@ -96,4 +96,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/index.html b/docs/coverage/test/html/neureka.dtype.custom/index.html index 6e8425d87..347fd9b2f 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/index.html +++ b/docs/coverage/test/html/neureka.dtype.custom/index.html @@ -1 +1 @@ -neureka.dtype.custom

neureka.dtype.custom

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total293 of 1,64082%23 of 10678%622407934848187011
UI8987443%14212%1426244071801
UI16445756%20%919132681801
F643621585%31583%62873841901
I82113686%11593%42663631801
UI322110182%n/a81882281801
UI64217177%n/a81882681801
I321618792%11593%42744231901
I161616090%11593%42743931901
F321116693%11593%32633621801
I64910892%4100%22022621801
AbstractNumericType72100%2100%050170401
\ No newline at end of file +neureka.dtype.custom

neureka.dtype.custom

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total293 of 1,64082%23 of 10678%622407934848187011
UI8987443%14212%1426244071801
UI16445756%20%919132681801
F643621585%31583%62873841901
I82113686%11593%42663631801
UI322110182%n/a81882281801
UI64217177%n/a81882681801
I321618792%11593%42744231901
I161616090%11593%42743931901
F321116693%11593%32633621801
I64910892%4100%22022621801
AbstractNumericType72100%2100%050170401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype.custom/index.source.html b/docs/coverage/test/html/neureka.dtype.custom/index.source.html index 81b260d32..665e22618 100644 --- a/docs/coverage/test/html/neureka.dtype.custom/index.source.html +++ b/docs/coverage/test/html/neureka.dtype.custom/index.source.html @@ -1 +1 @@ -neureka.dtype.custom

neureka.dtype.custom

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total293 of 1,64082%23 of 10678%622407934848187011
UI8.java987443%14212%1426244071801
UI16.java445756%20%919132681801
F64.java3621585%31583%62873841901
I8.java2113686%11593%42663631801
UI32.java2110182%n/a81882281801
UI64.java217177%n/a81882681801
I32.java1618792%11593%42744231901
I16.java1616090%11593%42743931901
F32.java1116693%11593%32633621801
I64.java910892%4100%22022621801
AbstractNumericType.java72100%2100%050170401
\ No newline at end of file +neureka.dtype.custom

neureka.dtype.custom

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total293 of 1,64082%23 of 10678%622407934848187011
UI8.java987443%14212%1426244071801
UI16.java445756%20%919132681801
F64.java3621585%31583%62873841901
I8.java2113686%11593%42663631801
UI32.java2110182%n/a81882281801
UI64.java217177%n/a81882681801
I32.java1618792%11593%42744231901
I16.java1616090%11593%42743931901
F32.java1116693%11593%32633621801
I64.java910892%4100%22022621801
AbstractNumericType.java72100%2100%050170401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype/DataType$1.html b/docs/coverage/test/html/neureka.dtype/DataType$1.html index 787c3e2ad..dab3e46ff 100644 --- a/docs/coverage/test/html/neureka.dtype/DataType$1.html +++ b/docs/coverage/test/html/neureka.dtype/DataType$1.html @@ -1 +1 @@ -DataType.new LinkedHashMap() {...}

DataType.new LinkedHashMap() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1181%1 of 250%130202
removeEldestEntry(Map.Entry)2675%1150%120101
{...}3100%n/a010101
\ No newline at end of file +DataType.new LinkedHashMap() {...}

DataType.new LinkedHashMap() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1181%1 of 250%130202
removeEldestEntry(Map.Entry)2675%1150%120101
{...}3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype/DataType.html b/docs/coverage/test/html/neureka.dtype/DataType.html index 9018222be..1bb3f5cbc 100644 --- a/docs/coverage/test/html/neureka.dtype/DataType.html +++ b/docs/coverage/test/html/neureka.dtype/DataType.html @@ -1 +1 @@ -DataType

DataType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total38 of 31687%7 of 4684%835656112
getTypeClassInstance(Class)304761%3350%3451501
hashCode()60%n/a111111
equals(Object)21789%1375%130401
_numericTypeRepresentationOf(Class)69100%32187%3130801
toString()35100%2100%020201
of(Class)30100%2100%020701
dataArrayType()25100%6100%040701
DataType(Class)24100%n/a010501
getItemTypeClass()12100%2100%020301
typeClassImplements(Class)11100%n/a010201
static {...}5100%n/a010101
getRepresentativeType()3100%n/a010101
\ No newline at end of file +DataType

DataType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total38 of 31687%7 of 4684%835656112
getTypeClassInstance(Class)304761%3350%3451501
hashCode()60%n/a111111
equals(Object)21789%1375%130401
_numericTypeRepresentationOf(Class)69100%32187%3130801
toString()35100%2100%020201
of(Class)30100%2100%020701
dataArrayType()25100%6100%040701
DataType(Class)24100%n/a010501
getItemTypeClass()12100%2100%020301
typeClassImplements(Class)11100%n/a010201
static {...}5100%n/a010101
getRepresentativeType()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype/DataType.java.html b/docs/coverage/test/html/neureka.dtype/DataType.java.html index af2ddd9b8..d7ceb685a 100644 --- a/docs/coverage/test/html/neureka.dtype/DataType.java.html +++ b/docs/coverage/test/html/neureka.dtype/DataType.java.html @@ -179,4 +179,4 @@ return _typeClass; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype/index.html b/docs/coverage/test/html/neureka.dtype/index.html index 9c280e60b..5aaa64485 100644 --- a/docs/coverage/test/html/neureka.dtype/index.html +++ b/docs/coverage/test/html/neureka.dtype/index.html @@ -1 +1 @@ -neureka.dtype

neureka.dtype

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total40 of 32787%8 of 4883%93865711402
DataType3827887%73984%83565611201
DataType.new LinkedHashMap() {...}981%1150%13020201
\ No newline at end of file +neureka.dtype

neureka.dtype

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total40 of 32787%8 of 4883%93865711402
DataType3827887%73984%83565611201
DataType.new LinkedHashMap() {...}981%1150%13020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.dtype/index.source.html b/docs/coverage/test/html/neureka.dtype/index.source.html index abb5d5e53..114f40286 100644 --- a/docs/coverage/test/html/neureka.dtype/index.source.html +++ b/docs/coverage/test/html/neureka.dtype/index.source.html @@ -1 +1 @@ -neureka.dtype

neureka.dtype

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total40 of 32787%8 of 4883%93865711402
DataType.java4028787%84083%93865711402
\ No newline at end of file +neureka.dtype

neureka.dtype

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total40 of 32787%8 of 4883%93865711402
DataType.java4028787%84083%93865711402
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.html b/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.html index 74d4a24d3..3cdb89a11 100644 --- a/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.html +++ b/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.html @@ -1 +1 @@ -IterByOrIterFromOrAllTensor

IterByOrIterFromOrAllTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 5100%0 of 0n/a010101
andFill(List)5100%n/a010101
\ No newline at end of file +IterByOrIterFromOrAllTensor

IterByOrIterFromOrAllTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 5100%0 of 0n/a010101
andFill(List)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.java.html b/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.java.html index 0cf6d5c61..f321f437e 100644 --- a/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.java.html +++ b/docs/coverage/test/html/neureka.fluent.building.states/IterByOrIterFromOrAllTensor.java.html @@ -33,4 +33,4 @@ Tensor<V> andSeed(Object seed ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.html b/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.html index 7f48432d4..f40e9b6dd 100644 --- a/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.html +++ b/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.html @@ -1 +1 @@ -WithShapeOrScalarOrVectorTensor

WithShapeOrScalarOrVectorTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 4163%0 of 0n/a133713
vector(Iterable)150%n/a113311
withShape(List)18100%n/a010301
vector(List)8100%n/a010101
\ No newline at end of file +WithShapeOrScalarOrVectorTensor

WithShapeOrScalarOrVectorTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total15 of 4163%0 of 0n/a133713
vector(Iterable)150%n/a113311
withShape(List)18100%n/a010301
vector(List)8100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.java.html b/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.java.html index f26a41ca8..bdd5a9f23 100644 --- a/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.java.html +++ b/docs/coverage/test/html/neureka.fluent.building.states/WithShapeOrScalarOrVectorTensor.java.html @@ -41,4 +41,4 @@ Tensor<V> scalar(V value ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building.states/index.html b/docs/coverage/test/html/neureka.fluent.building.states/index.html index 9c01abaad..b4670666b 100644 --- a/docs/coverage/test/html/neureka.fluent.building.states/index.html +++ b/docs/coverage/test/html/neureka.fluent.building.states/index.html @@ -1 +1 @@ -neureka.fluent.building.states

neureka.fluent.building.states

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total15 of 4667%0 of 0n/a14381402
WithShapeOrScalarOrVectorTensor152663%n/a13371301
IterByOrIterFromOrAllTensor5100%n/a01010101
\ No newline at end of file +neureka.fluent.building.states

neureka.fluent.building.states

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total15 of 4667%0 of 0n/a14381402
WithShapeOrScalarOrVectorTensor152663%n/a13371301
IterByOrIterFromOrAllTensor5100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building.states/index.source.html b/docs/coverage/test/html/neureka.fluent.building.states/index.source.html index 29c812522..5073892e9 100644 --- a/docs/coverage/test/html/neureka.fluent.building.states/index.source.html +++ b/docs/coverage/test/html/neureka.fluent.building.states/index.source.html @@ -1 +1 @@ -neureka.fluent.building.states

neureka.fluent.building.states

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total15 of 4667%0 of 0n/a14381402
WithShapeOrScalarOrVectorTensor.java152663%n/a13371301
IterByOrIterFromOrAllTensor.java5100%n/a01010101
\ No newline at end of file +neureka.fluent.building.states

neureka.fluent.building.states

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total15 of 4667%0 of 0n/a14381402
WithShapeOrScalarOrVectorTensor.java152663%n/a13371301
IterByOrIterFromOrAllTensor.java5100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.html b/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.html index dcbcdbb32..1281ff85e 100644 --- a/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.html +++ b/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.html @@ -1 +1 @@ -NdaBuilder

NdaBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total47 of 80094%14 of 8683%12667132023
scalar(Object)243458%1375%1341101
step(double)1328195%83078%62025701
_checked(Object)85487%31583%31001101
_isAllOne(Object[])4195%2880%261901
andSeed(Object)101100%8100%0501201
vector(Object[])34100%2100%020401
andFill(Object[])26100%2100%020301
withShape(int[])24100%2100%020501
_size()21100%2100%020301
NdaBuilder(Class)20100%n/a010501
_get(Object)19100%n/a010201
andFillFrom(Object)19100%n/a010301
on(Device)15100%n/a010301
andWhere(Filler)10100%n/a010101
lambda$step$5(List, int)9100%n/a010101
lambda$step$3(List, int)9100%n/a010101
lambda$step$1(List, int)9100%n/a010101
to(Object)7100%n/a010101
all(Object)4100%n/a010101
lambda$step$4(int)4100%n/a010101
lambda$step$2(int)4100%n/a010101
lambda$step$0(int)4100%n/a010101
static {...}4100%n/a010101
\ No newline at end of file +NdaBuilder

NdaBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total47 of 80094%14 of 8683%12667132023
scalar(Object)243458%1375%1341101
step(double)1328195%83078%62025701
_checked(Object)85487%31583%31001101
_isAllOne(Object[])4195%2880%261901
andSeed(Object)101100%8100%0501201
vector(Object[])34100%2100%020401
andFill(Object[])26100%2100%020301
withShape(int[])24100%2100%020501
_size()21100%2100%020301
NdaBuilder(Class)20100%n/a010501
_get(Object)19100%n/a010201
andFillFrom(Object)19100%n/a010301
on(Device)15100%n/a010301
andWhere(Filler)10100%n/a010101
lambda$step$5(List, int)9100%n/a010101
lambda$step$3(List, int)9100%n/a010101
lambda$step$1(List, int)9100%n/a010101
to(Object)7100%n/a010101
all(Object)4100%n/a010101
lambda$step$4(int)4100%n/a010101
lambda$step$2(int)4100%n/a010101
lambda$step$0(int)4100%n/a010101
static {...}4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.java.html b/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.java.html index f9859a417..3775627ce 100644 --- a/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.java.html +++ b/docs/coverage/test/html/neureka.fluent.building/NdaBuilder.java.html @@ -72,7 +72,7 @@ private Shape _shape; private V _from; private V _to; - private Device<V> _device = (Device<V>) CPU.get(); + private Device<? super V> _device = CPU.get(); /** * @param typeClass The type of the values which ought to be represented by the {@link Tensor} built by this {@link NdaBuilder}. @@ -299,10 +299,10 @@ } @Override - public WithShapeOrScalarOrVectorTensor<V> on(Device<V> device ) { + public WithShapeOrScalarOrVectorTensor<V> on(Device<? super V> device ) { LogUtil.nullArgCheck(device, "device", Device.class, "Cannot create a tensor with an undefined device!"); _device = device; return this; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building/index.html b/docs/coverage/test/html/neureka.fluent.building/index.html index a84283f57..0985925a8 100644 --- a/docs/coverage/test/html/neureka.fluent.building/index.html +++ b/docs/coverage/test/html/neureka.fluent.building/index.html @@ -1 +1 @@ -neureka.fluent.building

neureka.fluent.building

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 80094%14 of 8683%1266713202301
NdaBuilder4775394%147283%1266713202301
\ No newline at end of file +neureka.fluent.building

neureka.fluent.building

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 80094%14 of 8683%1266713202301
NdaBuilder4775394%147283%1266713202301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.building/index.source.html b/docs/coverage/test/html/neureka.fluent.building/index.source.html index 1ad6649f8..ce55a164c 100644 --- a/docs/coverage/test/html/neureka.fluent.building/index.source.html +++ b/docs/coverage/test/html/neureka.fluent.building/index.source.html @@ -1 +1 @@ -neureka.fluent.building

neureka.fluent.building

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 80094%14 of 8683%1266713202301
NdaBuilder.java4775394%147283%1266713202301
\ No newline at end of file +neureka.fluent.building

neureka.fluent.building

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total47 of 80094%14 of 8683%1266713202301
NdaBuilder.java4775394%147283%1266713202301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.html b/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.html index 79be0ee8d..3ba8f9bfb 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.html @@ -1 +1 @@ -AxisSliceBuilder

AxisSliceBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 9292%0 of 0n/a110121110
all()70%n/a111111
AxisSliceBuilder(int, AxisSliceBuilder.Resolution)17100%n/a010601
axis(int)12100%n/a010101
get()11100%n/a010101
detached()11100%n/a010101
resolve()11100%n/a010201
at(int)8100%n/a010301
from(int)5100%n/a010201
to(int)5100%n/a010201
step(int)5100%n/a010201
\ No newline at end of file +AxisSliceBuilder

AxisSliceBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 9292%0 of 0n/a110121110
all()70%n/a111111
AxisSliceBuilder(int, AxisSliceBuilder.Resolution)17100%n/a010601
axis(int)12100%n/a010101
get()11100%n/a010101
detached()11100%n/a010101
resolve()11100%n/a010201
at(int)8100%n/a010301
from(int)5100%n/a010201
to(int)5100%n/a010201
step(int)5100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.java.html b/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.java.html index dd7bbbb80..39988b440 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.java.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/AxisSliceBuilder.java.html @@ -111,4 +111,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.html b/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.html index 6a795b2d7..77e8eb724 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.html @@ -1 +1 @@ -SliceBuilder

SliceBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 24393%4 of 2281%41804007
lambda$new$1(int[], int, int[], int[], int[], int, int, int)126283%31178%3801301
axis(int)51066%1150%120201
SliceBuilder(Tensor)62100%2100%0201201
lambda$new$0(Tensor, int[], int[], int[], boolean)49100%n/a010801
lambda$new$2(SliceBuilder.CreationCallback, int[], int[], int[], Boolean)29100%4100%030301
get()7100%n/a010101
detached()7100%n/a010101
\ No newline at end of file +SliceBuilder

SliceBuilder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 24393%4 of 2281%41804007
lambda$new$1(int[], int, int[], int[], int[], int, int, int)126283%31178%3801301
axis(int)51066%1150%120201
SliceBuilder(Tensor)62100%2100%0201201
lambda$new$0(Tensor, int[], int[], int[], boolean)49100%n/a010801
lambda$new$2(SliceBuilder.CreationCallback, int[], int[], int[], Boolean)29100%4100%030301
get()7100%n/a010101
detached()7100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.java.html b/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.java.html index f58bdc3e7..b83326a57 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.java.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/SliceBuilder.java.html @@ -121,4 +121,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.html b/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.html index edd239050..cc9621a62 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.html @@ -1 +1 @@ -SmartSlicer

SmartSlicer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total53 of 47288%7 of 4484%82568113
slice(Object[], Tensor)5041589%73784%72357901
SmartSlicer()0%n/a111111
static {...}4100%n/a010101
\ No newline at end of file +SmartSlicer

SmartSlicer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total53 of 47288%7 of 4484%82568113
slice(Object[], Tensor)5041589%73784%72357901
SmartSlicer()0%n/a111111
static {...}4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.java.html b/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.java.html index a46ed34c5..5855f43b7 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.java.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/SmartSlicer.java.html @@ -137,4 +137,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/index.html b/docs/coverage/test/html/neureka.fluent.slicing/index.html index 9120addfe..4c85d328e 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/index.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/index.html @@ -1 +1 @@ -neureka.fluent.slicing

neureka.fluent.slicing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total77 of 80790%11 of 6683%1353714222003
SmartSlicer5341988%73784%8256811301
SliceBuilder1722693%41881%4180400701
AxisSliceBuilder78592%n/a11012111001
\ No newline at end of file +neureka.fluent.slicing

neureka.fluent.slicing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total77 of 80790%11 of 6683%1353714222003
SmartSlicer5341988%73784%8256811301
SliceBuilder1722693%41881%4180400701
AxisSliceBuilder78592%n/a11012111001
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.fluent.slicing/index.source.html b/docs/coverage/test/html/neureka.fluent.slicing/index.source.html index dbee6361b..11aa90eda 100644 --- a/docs/coverage/test/html/neureka.fluent.slicing/index.source.html +++ b/docs/coverage/test/html/neureka.fluent.slicing/index.source.html @@ -1 +1 @@ -neureka.fluent.slicing

neureka.fluent.slicing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total77 of 80790%11 of 6683%1353714222003
SmartSlicer.java5341988%73784%8256811301
SliceBuilder.java1722693%41881%4180400701
AxisSliceBuilder.java78592%n/a11012111001
\ No newline at end of file +neureka.fluent.slicing

neureka.fluent.slicing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total77 of 80790%11 of 6683%1353714222003
SmartSlicer.java5341988%73784%8256811301
SliceBuilder.java1722693%41881%4180400701
AxisSliceBuilder.java78592%n/a11012111001
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing.fluent/AxisFrame$Builder.html b/docs/coverage/test/html/neureka.framing.fluent/AxisFrame$Builder.html index bdee9897b..d4cb36c26 100644 --- a/docs/coverage/test/html/neureka.framing.fluent/AxisFrame$Builder.html +++ b/docs/coverage/test/html/neureka.framing.fluent/AxisFrame$Builder.html @@ -1 +1 @@ -AxisFrame.Builder

AxisFrame.Builder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 43100%0 of 0n/a0701207
build()15100%n/a010101
getter(At)5100%n/a010201
setter(At)5100%n/a010201
replacer(Replace)5100%n/a010201
allAliasGetter(Supplier)5100%n/a010201
allAliasGetterFor(Function)5100%n/a010201
AxisFrame.Builder()3100%n/a010101
\ No newline at end of file +AxisFrame.Builder

AxisFrame.Builder

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 43100%0 of 0n/a0701207
build()15100%n/a010101
getter(At)5100%n/a010201
setter(At)5100%n/a010201
replacer(Replace)5100%n/a010201
allAliasGetter(Supplier)5100%n/a010201
allAliasGetterFor(Function)5100%n/a010201
AxisFrame.Builder()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.html b/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.html index f192aaa59..3fde7c00d 100644 --- a/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.html +++ b/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.html @@ -1 +1 @@ -AxisFrame

AxisFrame

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 52100%0 of 0n/a0701307
AxisFrame(At, At, Replace, Supplier, Function)18100%n/a010701
getIndexAtAlias(Object)7100%n/a010101
getAllAliasesForIndex(int)7100%n/a010101
atIndexAlias(Object)6100%n/a010101
replace(Object)5100%n/a010101
getAllAliases()5100%n/a010101
builder()4100%n/a010101
\ No newline at end of file +AxisFrame

AxisFrame

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 52100%0 of 0n/a0701307
AxisFrame(At, At, Replace, Supplier, Function)18100%n/a010701
getIndexAtAlias(Object)7100%n/a010101
getAllAliasesForIndex(int)7100%n/a010101
atIndexAlias(Object)6100%n/a010101
replace(Object)5100%n/a010101
getAllAliases()5100%n/a010101
builder()4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.java.html b/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.java.html index 14e6b6dcd..968df35aa 100644 --- a/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.java.html +++ b/docs/coverage/test/html/neureka.framing.fluent/AxisFrame.java.html @@ -113,4 +113,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing.fluent/index.html b/docs/coverage/test/html/neureka.framing.fluent/index.html index bafd84e17..2fcfa3498 100644 --- a/docs/coverage/test/html/neureka.framing.fluent/index.html +++ b/docs/coverage/test/html/neureka.framing.fluent/index.html @@ -1 +1 @@ -neureka.framing.fluent

neureka.framing.fluent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 95100%0 of 0n/a01402501402
AxisFrame52100%n/a070130701
AxisFrame.Builder43100%n/a070120701
\ No newline at end of file +neureka.framing.fluent

neureka.framing.fluent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 95100%0 of 0n/a01402501402
AxisFrame52100%n/a070130701
AxisFrame.Builder43100%n/a070120701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing.fluent/index.source.html b/docs/coverage/test/html/neureka.framing.fluent/index.source.html index ccc257195..e38a3d5f9 100644 --- a/docs/coverage/test/html/neureka.framing.fluent/index.source.html +++ b/docs/coverage/test/html/neureka.framing.fluent/index.source.html @@ -1 +1 @@ -neureka.framing.fluent

neureka.framing.fluent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 95100%0 of 0n/a01402501402
AxisFrame.java95100%n/a01402501402
\ No newline at end of file +neureka.framing.fluent

neureka.framing.fluent

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 95100%0 of 0n/a01402501402
AxisFrame.java95100%n/a01402501402
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing/NDFrame.html b/docs/coverage/test/html/neureka.framing/NDFrame.html index 1ec923569..b6e5ee009 100644 --- a/docs/coverage/test/html/neureka.framing/NDFrame.html +++ b/docs/coverage/test/html/neureka.framing/NDFrame.html @@ -1 +1 @@ -NDFrame

NDFrame

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total78 of 1,02092%15 of 8882%147814176234
get(Object[])500%60%449911
get(List)130%n/a112211
getState()65990%1583%1401101
lambda$atAxis$11(Object, Integer)42184%1150%121501
lambda$atAxis$2(Object, Object)31785%1150%121401
_paddedCentered(String, int)25996%1375%1311001
toString()182100%14100%0803301
lambda$new$0(int[], Tensor, Object, List)68100%6100%040901
NDFrame(Map, Tensor, String)67100%1375%1301201
_label(List)63100%1990%160701
lambda$toString$15(Object[], int[], StringBuilder, Object, Object)63100%2880%2601101
_initializeIndexMap(Object, Object, int)57100%6100%0401001
lambda$atAxis$9(Object)40100%6100%040601
atAxis(Object)29100%n/a010801
NDFrame(List, Map, String)22100%n/a010601
lambda$toString$13(StringBuilder, Object, Object)22100%2100%020601
lambda$atAxis$6(Object, Object, Object)22100%1150%120401
lambda$new$1(int[], Object, List)20100%n/a010301
hasLabelsForAxis(Object)16100%2100%020201
lambda$atAxis$4(Object, Object, int)14100%n/a010301
withAxesLabels(List)12100%n/a010101
lambda$toString$12(int, int, Integer)12100%2100%020101
lambda$toString$14(int[], Object[], Object, Integer)11100%2100%020201
NDFrame(List, Tensor, String)10100%n/a010301
withLabel(String)9100%n/a010101
lambda$atAxis$10(Integer, List, Object, Integer)9100%2100%020101
NDFrame(Tensor, String)6100%n/a010201
update(Component.OwnerChangeRequest)5100%n/a010201
lambda$atAxis$8(List, Object, Integer)5100%n/a010101
lambda$atAxis$7(Object, Object)5100%n/a010101
lambda$atAxis$5(Object, Object)5100%n/a010101
lambda$atAxis$3(Object, Object)5100%n/a010101
_mapping()4100%n/a010101
getLabel()3100%n/a010101
\ No newline at end of file +NDFrame

NDFrame

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total78 of 1,02092%15 of 8882%147814176234
get(Object[])500%60%449911
get(List)130%n/a112211
getState()65990%1583%1401101
lambda$atAxis$11(Object, Integer)42184%1150%121501
lambda$atAxis$2(Object, Object)31785%1150%121401
_paddedCentered(String, int)25996%1375%1311001
toString()182100%14100%0803301
lambda$new$0(int[], Tensor, Object, List)68100%6100%040901
NDFrame(Map, Tensor, String)67100%1375%1301201
_label(List)63100%1990%160701
lambda$toString$15(Object[], int[], StringBuilder, Object, Object)63100%2880%2601101
_initializeIndexMap(Object, Object, int)57100%6100%0401001
lambda$atAxis$9(Object)40100%6100%040601
atAxis(Object)29100%n/a010801
NDFrame(List, Map, String)22100%n/a010601
lambda$toString$13(StringBuilder, Object, Object)22100%2100%020601
lambda$atAxis$6(Object, Object, Object)22100%1150%120401
lambda$new$1(int[], Object, List)20100%n/a010301
hasLabelsForAxis(Object)16100%2100%020201
lambda$atAxis$4(Object, Object, int)14100%n/a010301
withAxesLabels(List)12100%n/a010101
lambda$toString$12(int, int, Integer)12100%2100%020101
lambda$toString$14(int[], Object[], Object, Integer)11100%2100%020201
NDFrame(List, Tensor, String)10100%n/a010301
withLabel(String)9100%n/a010101
lambda$atAxis$10(Integer, List, Object, Integer)9100%2100%020101
NDFrame(Tensor, String)6100%n/a010201
update(Component.OwnerChangeRequest)5100%n/a010201
lambda$atAxis$8(List, Object, Integer)5100%n/a010101
lambda$atAxis$7(Object, Object)5100%n/a010101
lambda$atAxis$5(Object, Object)5100%n/a010101
lambda$atAxis$3(Object, Object)5100%n/a010101
_mapping()4100%n/a010101
getLabel()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing/NDFrame.java.html b/docs/coverage/test/html/neureka.framing/NDFrame.java.html index ceea29b3d..504922331 100644 --- a/docs/coverage/test/html/neureka.framing/NDFrame.java.html +++ b/docs/coverage/test/html/neureka.framing/NDFrame.java.html @@ -333,4 +333,4 @@ return _mainLabel; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing/Relation.html b/docs/coverage/test/html/neureka.framing/Relation.html index 594c97ea7..e6275cfe1 100644 --- a/docs/coverage/test/html/neureka.framing/Relation.html +++ b/docs/coverage/test/html/neureka.framing/Relation.html @@ -1 +1 @@ -Relation

Relation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total186 of 48861%34 of 6648%23493380116
removeChild(Tensor)1040%100%66191911
update(Component.OwnerChangeRequest)742122%15316%810131801
findRootTensor()22893%1583%140401
getPermuteRelationFor(Tensor)22692%2466%241501
childCount()21184%1150%120101
hasChildren()11090%2250%230101
lambda$childCount$0(WeakReference)1685%1150%120101
addChild(Tensor)74100%2100%0201201
getChildren()48100%2675%250901
addPermuteRelationFor(Tensor, int[])27100%6100%040501
toString()24100%n/a010101
hasParent()7100%2100%020101
Relation(Tensor)6100%n/a010101
newParentToChildren()5100%n/a010101
newChildToParent(Tensor)5100%n/a010101
getParent()4100%n/a010101
\ No newline at end of file +Relation

Relation

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total188 of 48861%35 of 6646%24493380116
removeChild(Tensor)1040%100%66191911
update(Component.OwnerChangeRequest)742122%15316%810131801
findRootTensor()42686%2466%240401
getPermuteRelationFor(Tensor)22692%2466%241501
childCount()21184%1150%120101
hasChildren()11090%2250%230101
lambda$childCount$0(WeakReference)1685%1150%120101
addChild(Tensor)74100%2100%0201201
getChildren()48100%2675%250901
addPermuteRelationFor(Tensor, int[])27100%6100%040501
toString()24100%n/a010101
hasParent()7100%2100%020101
Relation(Tensor)6100%n/a010101
newParentToChildren()5100%n/a010101
newChildToParent(Tensor)5100%n/a010101
getParent()4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing/Relation.java.html b/docs/coverage/test/html/neureka.framing/Relation.java.html index 9f031130e..310d78644 100644 --- a/docs/coverage/test/html/neureka.framing/Relation.java.html +++ b/docs/coverage/test/html/neureka.framing/Relation.java.html @@ -228,7 +228,7 @@ */ public Optional<Tensor<V>> findRootTensor() { - if ( _parent == null ) return Optional.empty(); + if ( _parent == null ) return Optional.empty(); else if ( !_parent.has( Relation.class ) ) return Optional.empty(); else if ( !_parent.get( Relation.class ).hasParent() ) return Optional.of(_parent); else return _parent.get( Relation.class ).findRootTensor(); @@ -285,4 +285,4 @@ return Optional.ofNullable( _parent ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing/index.html b/docs/coverage/test/html/neureka.framing/index.html index db5799520..c51b157f1 100644 --- a/docs/coverage/test/html/neureka.framing/index.html +++ b/docs/coverage/test/html/neureka.framing/index.html @@ -1 +1 @@ -neureka.framing

neureka.framing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total264 of 1,50882%49 of 15468%371274725635002
Relation18630261%343248%2349338011601
NDFrame7894292%157382%14781417623401
\ No newline at end of file +neureka.framing

neureka.framing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total266 of 1,50882%50 of 15467%381274725635002
Relation18830061%353146%2449338011601
NDFrame7894292%157382%14781417623401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.framing/index.source.html b/docs/coverage/test/html/neureka.framing/index.source.html index 39089f2ed..432dd698a 100644 --- a/docs/coverage/test/html/neureka.framing/index.source.html +++ b/docs/coverage/test/html/neureka.framing/index.source.html @@ -1 +1 @@ -neureka.framing

neureka.framing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total264 of 1,50882%49 of 15468%371274725635002
Relation.java18630261%343248%2349338011601
NDFrame.java7894292%157382%14781417623401
\ No newline at end of file +neureka.framing

neureka.framing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total266 of 1,50882%50 of 15467%381274725635002
Relation.java18830061%353146%2449338011601
NDFrame.java7894292%157382%14781417623401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Axis.html b/docs/coverage/test/html/neureka.math.args/Arg$Axis.html index b4dd920dc..ea3485968 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Axis.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Axis.html @@ -1 +1 @@ -Arg.Axis

Arg.Axis

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
of(int)5100%n/a010101
Arg.Axis(int)5100%n/a010101
\ No newline at end of file +Arg.Axis

Arg.Axis

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
of(int)5100%n/a010101
Arg.Axis(int)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$DerivIdx.html b/docs/coverage/test/html/neureka.math.args/Arg$DerivIdx.html index 1b85cb473..0ade2e20b 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$DerivIdx.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$DerivIdx.html @@ -1 +1 @@ -Arg.DerivIdx

Arg.DerivIdx

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
of(int)5100%n/a010101
Arg.DerivIdx(int)5100%n/a010101
\ No newline at end of file +Arg.DerivIdx

Arg.DerivIdx

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
of(int)5100%n/a010101
Arg.DerivIdx(int)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Derivative.html b/docs/coverage/test/html/neureka.math.args/Arg$Derivative.html index 2597803ee..71dc8328f 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Derivative.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Derivative.html @@ -1 +1 @@ -Arg.Derivative

Arg.Derivative

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(Tensor)5100%n/a010101
Arg.Derivative(Tensor)4100%n/a010101
\ No newline at end of file +Arg.Derivative

Arg.Derivative

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(Tensor)5100%n/a010101
Arg.Derivative(Tensor)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Ends.html b/docs/coverage/test/html/neureka.math.args/Arg$Ends.html index 048f2264b..d4494c87e 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Ends.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Ends.html @@ -1 +1 @@ -Arg.Ends

Arg.Ends

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 90%0 of 0n/a222222
of(int[])50%n/a111111
Arg.Ends(int[])40%n/a111111
\ No newline at end of file +Arg.Ends

Arg.Ends

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total9 of 90%0 of 0n/a222222
of(int[])50%n/a111111
Arg.Ends(int[])40%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Indices.html b/docs/coverage/test/html/neureka.math.args/Arg$Indices.html index d7b548b33..f89747377 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Indices.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Indices.html @@ -1 +1 @@ -Arg.Indices

Arg.Indices

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Indices(int[])4100%n/a010101
\ No newline at end of file +Arg.Indices

Arg.Indices

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Indices(int[])4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Layout.html b/docs/coverage/test/html/neureka.math.args/Arg$Layout.html index f1909031a..8ec1c3c6f 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Layout.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Layout.html @@ -1 +1 @@ -Arg.Layout

Arg.Layout

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(NDConfiguration.Layout)5100%n/a010101
Arg.Layout(NDConfiguration.Layout)4100%n/a010101
\ No newline at end of file +Arg.Layout

Arg.Layout

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(NDConfiguration.Layout)5100%n/a010101
Arg.Layout(NDConfiguration.Layout)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$MinRank.html b/docs/coverage/test/html/neureka.math.args/Arg$MinRank.html index 8d47d9c13..650422044 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$MinRank.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$MinRank.html @@ -1 +1 @@ -Arg.MinRank

Arg.MinRank

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 100%0 of 0n/a222222
of(int)50%n/a111111
Arg.MinRank(int)50%n/a111111
\ No newline at end of file +Arg.MinRank

Arg.MinRank

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 100%0 of 0n/a222222
of(int)50%n/a111111
Arg.MinRank(int)50%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Offset.html b/docs/coverage/test/html/neureka.math.args/Arg$Offset.html index 1c932c919..474654ad2 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Offset.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Offset.html @@ -1 +1 @@ -Arg.Offset

Arg.Offset

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Offset(int[])4100%n/a010101
\ No newline at end of file +Arg.Offset

Arg.Offset

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Offset(int[])4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Seed.html b/docs/coverage/test/html/neureka.math.args/Arg$Seed.html index c00cf1c39..b998b30cb 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Seed.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Seed.html @@ -1 +1 @@ -Arg.Seed

Arg.Seed

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 39100%0 of 2100%050704
_longStringHash(String)23100%2100%020401
of(String)6100%n/a010101
of(long)5100%n/a010101
Arg.Seed(long)5100%n/a010101
\ No newline at end of file +Arg.Seed

Arg.Seed

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 39100%0 of 2100%050704
_longStringHash(String)23100%2100%020401
of(String)6100%n/a010101
of(long)5100%n/a010101
Arg.Seed(long)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Shape.html b/docs/coverage/test/html/neureka.math.args/Arg$Shape.html index ba1a9e14c..ef3bb3ca2 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Shape.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Shape.html @@ -1 +1 @@ -Arg.Shape

Arg.Shape

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Shape(int[])4100%n/a010101
\ No newline at end of file +Arg.Shape

Arg.Shape

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Shape(int[])4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$Stride.html b/docs/coverage/test/html/neureka.math.args/Arg$Stride.html index 32391f51f..a9a8152e6 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$Stride.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$Stride.html @@ -1 +1 @@ -Arg.Stride

Arg.Stride

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Stride(int[])4100%n/a010101
\ No newline at end of file +Arg.Stride

Arg.Stride

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(int[])5100%n/a010101
Arg.Stride(int[])4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$TargetDevice.html b/docs/coverage/test/html/neureka.math.args/Arg$TargetDevice.html index 2ad96be2b..d22db7648 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$TargetDevice.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$TargetDevice.html @@ -1 +1 @@ -Arg.TargetDevice

Arg.TargetDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(Device)5100%n/a010101
Arg.TargetDevice(Device)4100%n/a010101
\ No newline at end of file +Arg.TargetDevice

Arg.TargetDevice

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 9100%0 of 0n/a020202
of(Device)5100%n/a010101
Arg.TargetDevice(Device)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg$VarIdx.html b/docs/coverage/test/html/neureka.math.args/Arg$VarIdx.html index 2d285514e..2915020f6 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg$VarIdx.html +++ b/docs/coverage/test/html/neureka.math.args/Arg$VarIdx.html @@ -1 +1 @@ -Arg.VarIdx

Arg.VarIdx

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
of(int)5100%n/a010101
Arg.VarIdx(int)5100%n/a010101
\ No newline at end of file +Arg.VarIdx

Arg.VarIdx

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 10100%0 of 0n/a020202
of(int)5100%n/a010101
Arg.VarIdx(int)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg.html b/docs/coverage/test/html/neureka.math.args/Arg.html index 3246daa60..7be8f9a3f 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg.html +++ b/docs/coverage/test/html/neureka.math.args/Arg.html @@ -1 +1 @@ -Arg

Arg

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 9964%7 of 1656%71201204
get()354053%7956%790901
toString()16100%n/a010101
Arg(Object)6100%n/a010101
update(Component.OwnerChangeRequest)2100%n/a010101
\ No newline at end of file +Arg

Arg

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total35 of 9964%7 of 1656%71201204
get()354053%7956%790901
toString()16100%n/a010101
Arg(Object)6100%n/a010101
update(Component.OwnerChangeRequest)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Arg.java.html b/docs/coverage/test/html/neureka.math.args/Arg.java.html index f35bc615a..04a34b065 100644 --- a/docs/coverage/test/html/neureka.math.args/Arg.java.html +++ b/docs/coverage/test/html/neureka.math.args/Arg.java.html @@ -131,4 +131,4 @@ public String toString() { return this.getClass().getSimpleName() + "[" + _value + "]"; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Args.html b/docs/coverage/test/html/neureka.math.args/Args.html index 122bbbc2b..1ab56240c 100644 --- a/docs/coverage/test/html/neureka.math.args/Args.html +++ b/docs/coverage/test/html/neureka.math.args/Args.html @@ -1 +1 @@ -Args

Args

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 5976%3 of 862%41031126
valOfOr(Class, Object)120%20%222211
_removeOrReject(Component)20%n/a111111
of(Arg[])28100%1375%130401
valOf(Class)12100%2100%020201
Args()3100%n/a010101
_setOrReject(Component)2100%n/a010101
\ No newline at end of file +Args

Args

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 5976%3 of 862%41031126
valOfOr(Class, Object)120%20%222211
_removeOrReject(Component)20%n/a111111
of(Arg[])28100%1375%130401
valOf(Class)12100%2100%020201
Args()3100%n/a010101
_setOrReject(Component)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/Args.java.html b/docs/coverage/test/html/neureka.math.args/Args.java.html index 2e805e66c..b0b9a8f86 100644 --- a/docs/coverage/test/html/neureka.math.args/Args.java.html +++ b/docs/coverage/test/html/neureka.math.args/Args.java.html @@ -33,4 +33,4 @@ return newComponent; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/index.html b/docs/coverage/test/html/neureka.math.args/index.html index 9d112406b..8b7ebabd4 100644 --- a/docs/coverage/test/html/neureka.math.args/index.html +++ b/docs/coverage/test/html/neureka.math.args/index.html @@ -1 +1 @@ -neureka.math.args

neureka.math.args

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total68 of 30977%10 of 2661%1551754638215
Arg356464%7956%7120120401
Args144576%3562%4103112601
Arg.MinRank100%n/a22222211
Arg.Ends90%n/a22222211
Arg.Seed39100%2100%05070401
Arg.VarIdx10100%n/a02020201
Arg.DerivIdx10100%n/a02020201
Arg.Axis10100%n/a02020201
Arg.Indices9100%n/a02020201
Arg.Layout9100%n/a02020201
Arg.Offset9100%n/a02020201
Arg.Derivative9100%n/a02020201
Arg.Stride9100%n/a02020201
Arg.Shape9100%n/a02020201
Arg.TargetDevice9100%n/a02020201
\ No newline at end of file +neureka.math.args

neureka.math.args

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total68 of 30977%10 of 2661%1551754638215
Arg356464%7956%7120120401
Args144576%3562%4103112601
Arg.MinRank100%n/a22222211
Arg.Ends90%n/a22222211
Arg.Seed39100%2100%05070401
Arg.VarIdx10100%n/a02020201
Arg.DerivIdx10100%n/a02020201
Arg.Axis10100%n/a02020201
Arg.Indices9100%n/a02020201
Arg.Layout9100%n/a02020201
Arg.Offset9100%n/a02020201
Arg.Derivative9100%n/a02020201
Arg.Stride9100%n/a02020201
Arg.Shape9100%n/a02020201
Arg.TargetDevice9100%n/a02020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.args/index.source.html b/docs/coverage/test/html/neureka.math.args/index.source.html index 7e0d9cdc9..ae06334b3 100644 --- a/docs/coverage/test/html/neureka.math.args/index.source.html +++ b/docs/coverage/test/html/neureka.math.args/index.source.html @@ -1 +1 @@ -neureka.math.args

neureka.math.args

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total68 of 30977%10 of 2661%1551754638215
Arg.java5419678%71161%1141443432214
Args.java144576%3562%4103112601
\ No newline at end of file +neureka.math.args

neureka.math.args

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total68 of 30977%10 of 2661%1551754638215
Arg.java5419678%71161%1141443432214
Args.java144576%3562%4103112601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.html b/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.html index 3befbe556..801ee2bc7 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.html @@ -1 +1 @@ -FunctionConstant

FunctionConstant

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 12491%3 of 1681%721437413
getSubFunctions()40%n/a111111
getDerivative(int)30%n/a111111
isFlat()20%n/a111111
getOperation()20%n/a111111
FunctionConstant(String)54100%21083%2701301
execute(Args, Tensor[])41100%1375%1301301
toString()4100%n/a010101
value()3100%n/a010101
call(double[], int)3100%n/a010101
isDoingAD()2100%n/a010101
dependsOn(int)2100%n/a010101
derive(double[], int)2100%n/a010101
derive(double[], int, int)2100%n/a010101
\ No newline at end of file +FunctionConstant

FunctionConstant

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 12491%3 of 1681%721437413
getSubFunctions()40%n/a111111
getDerivative(int)30%n/a111111
isFlat()20%n/a111111
getOperation()20%n/a111111
FunctionConstant(String)54100%21083%2701301
execute(Args, Tensor[])41100%1375%1301301
toString()4100%n/a010101
value()3100%n/a010101
call(double[], int)3100%n/a010101
isDoingAD()2100%n/a010101
dependsOn(int)2100%n/a010101
derive(double[], int)2100%n/a010101
derive(double[], int, int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.java.html b/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.java.html index 2098a1f31..bec539f1b 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.java.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionConstant.java.html @@ -88,4 +88,4 @@ @Override public String toString() { return String.valueOf( _value ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionInput.html b/docs/coverage/test/html/neureka.math.implementations/FunctionInput.html index 089a10ffb..169d5dbe2 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionInput.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionInput.html @@ -1 +1 @@ -FunctionInput

FunctionInput

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 30896%3 of 4493%438148116
call(double[], int)51676%1375%130301
getDerivative(int)2880%1150%120101
isFlat()20%n/a111111
execute(Args, Tensor[])18198%1787%1501001
of(String, boolean)75100%10100%0601201
_extract(Tensor)35100%6100%040901
toString()21100%2100%020101
derive(double[], int, int)13100%4100%030301
index()12100%2100%020101
dependsOn(int)8100%2100%020101
derive(double[], int)8100%2100%020101
providesGradient()7100%2100%020101
FunctionInput(int)6100%n/a010101
getSubFunctions()4100%n/a010101
isDoingAD()2100%n/a010101
getOperation()2100%n/a010101
\ No newline at end of file +FunctionInput

FunctionInput

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 30896%3 of 4493%438148116
call(double[], int)51676%1375%130301
getDerivative(int)2880%1150%120101
isFlat()20%n/a111111
execute(Args, Tensor[])18198%1787%1501001
of(String, boolean)75100%10100%0601201
_extract(Tensor)35100%6100%040901
toString()21100%2100%020101
derive(double[], int, int)13100%4100%030301
index()12100%2100%020101
dependsOn(int)8100%2100%020101
derive(double[], int)8100%2100%020101
providesGradient()7100%2100%020101
FunctionInput(int)6100%n/a010101
getSubFunctions()4100%n/a010101
isDoingAD()2100%n/a010101
getOperation()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionInput.java.html b/docs/coverage/test/html/neureka.math.implementations/FunctionInput.java.html index 831f02cfd..586d5646e 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionInput.java.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionInput.java.html @@ -131,4 +131,4 @@ public String toString() { return "I" + ( this.providesGradient() ? "g" : "" ) + "[" + index() + "]"; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionNode.html b/docs/coverage/test/html/neureka.math.implementations/FunctionNode.html index 173955eaa..1fc171981 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionNode.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionNode.html @@ -1 +1 @@ -FunctionNode

FunctionNode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 38293%3 of 6295%347258016
FunctionNode(Operation, List, boolean)2311783%22291%21322101
lambda$toString$0(Function)2571%1150%120101
_shareGuestDevice(Tensor[])81100%20100%0110901
_deviceFor(Tensor[])38100%10100%060501
execute(Args, Tensor[])28100%2100%020801
dependsOn(int)25100%4100%030301
toString()12100%n/a010401
call(double[], int)9100%n/a010101
derive(double[], int, int)9100%n/a010101
derive(double[], int)9100%n/a010101
getDerivative(int)8100%n/a010101
getSubFunctions()4100%n/a010101
getOperation()3100%n/a010101
isFlat()3100%n/a010101
isDoingAD()3100%n/a010101
lambda$toString$1(int)3100%n/a010101
\ No newline at end of file +FunctionNode

FunctionNode

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total34 of 38291%10 of 6283%947258016
FunctionNode(Operation, List, boolean)2311783%22291%21322101
_shareGuestDevice(Tensor[])97288%61470%5110901
lambda$toString$0(Function)2571%1150%120101
_deviceFor(Tensor[])38100%1990%160501
execute(Args, Tensor[])28100%2100%020801
dependsOn(int)25100%4100%030301
toString()12100%n/a010401
call(double[], int)9100%n/a010101
derive(double[], int, int)9100%n/a010101
derive(double[], int)9100%n/a010101
getDerivative(int)8100%n/a010101
getSubFunctions()4100%n/a010101
getOperation()3100%n/a010101
isFlat()3100%n/a010101
isDoingAD()3100%n/a010101
lambda$toString$1(int)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionNode.java.html b/docs/coverage/test/html/neureka.math.implementations/FunctionNode.java.html index 474ed5343..40f35fba3 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionNode.java.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionNode.java.html @@ -112,7 +112,7 @@ Device<?> device = inputs[ 0 ].get( Device.class ); boolean onSameDevice = _shareGuestDevice( inputs ); boolean doAccel = !_operation.getOperator().equals(",") && onSameDevice; - return ( doAccel && device != null ? device : inputs[ 0 ].getDevice() ); + return ( doAccel && device != null ? device : inputs[ 0 ].getDevice() ); } /** @@ -127,12 +127,12 @@ if ( device != null ) { for ( Tensor<?> tensor : tensors ) { - onSameGuestDevice = ( !tensor.isVirtual() && device == tensor.get(Device.class) ) && onSameGuestDevice; + onSameGuestDevice = ( !tensor.isVirtual() && device == tensor.get(Device.class) ) && onSameGuestDevice; } } else onSameGuestDevice = false; - if ( device != null && tensors.length == 2 && tensors[ 1 ].size() == 1 ) onSameGuestDevice = true; + if ( device != null && tensors.length == 2 && tensors[ 1 ].size() == 1 ) onSameGuestDevice = true; return onSameGuestDevice; } @@ -161,4 +161,4 @@ public boolean isDoingAD() { return _isDoingAD; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.html b/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.html index c07143226..7c51e9c4c 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.html @@ -1 +1 @@ -FunctionVariable

FunctionVariable

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total42 of 17475%6 of 2272%924831413
call(double[], int)19624%3125%233501
execute(Args, Tensor[])128387%31178%3811401
getSubFunctions()40%n/a111111
getDerivative(int)30%n/a111111
isFlat()20%n/a111111
getOperation()20%n/a111111
toString()16100%2100%020101
derive(double[], int, int)10100%2100%020201
FunctionVariable(String)8100%n/a010101
providesGradient()3100%n/a010101
isDoingAD()2100%n/a010101
dependsOn(int)2100%n/a010101
derive(double[], int)2100%n/a010101
\ No newline at end of file +FunctionVariable

FunctionVariable

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total42 of 17475%6 of 2272%924831413
call(double[], int)19624%3125%233501
execute(Args, Tensor[])128387%31178%3811401
getSubFunctions()40%n/a111111
getDerivative(int)30%n/a111111
isFlat()20%n/a111111
getOperation()20%n/a111111
toString()16100%2100%020101
derive(double[], int, int)10100%2100%020201
FunctionVariable(String)8100%n/a010101
providesGradient()3100%n/a010101
isDoingAD()2100%n/a010101
dependsOn(int)2100%n/a010101
derive(double[], int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.java.html b/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.java.html index e88629272..0bf472ce0 100644 --- a/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.java.html +++ b/docs/coverage/test/html/neureka.math.implementations/FunctionVariable.java.html @@ -92,4 +92,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/index.html b/docs/coverage/test/html/neureka.math.implementations/index.html index cb01a453e..b661437e3 100644 --- a/docs/coverage/test/html/neureka.math.implementations/index.html +++ b/docs/coverage/test/html/neureka.math.implementations/index.html @@ -1 +1 @@ -neureka.math.implementations

neureka.math.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total88 of 98891%15 of 14489%231301517495804
FunctionVariable4213275%61672%92483141301
FunctionNode2535793%35995%34725801601
FunctionConstant1111391%31381%72143741301
FunctionInput1029896%34193%43814811601
\ No newline at end of file +neureka.math.implementations

neureka.math.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total97 of 98890%22 of 14484%291301517495804
FunctionVariable4213275%61672%92483141301
FunctionNode3434891%105283%94725801601
FunctionConstant1111391%31381%72143741301
FunctionInput1029896%34193%43814811601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.implementations/index.source.html b/docs/coverage/test/html/neureka.math.implementations/index.source.html index 0aa08e169..4381a062b 100644 --- a/docs/coverage/test/html/neureka.math.implementations/index.source.html +++ b/docs/coverage/test/html/neureka.math.implementations/index.source.html @@ -1 +1 @@ -neureka.math.implementations

neureka.math.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total88 of 98891%15 of 14489%231301517495804
FunctionVariable.java4213275%61672%92483141301
FunctionNode.java2535793%35995%34725801601
FunctionConstant.java1111391%31381%72143741301
FunctionInput.java1029896%34193%43814811601
\ No newline at end of file +neureka.math.implementations

neureka.math.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total97 of 98890%22 of 14484%291301517495804
FunctionVariable.java4213275%61672%92483141301
FunctionNode.java3434891%105283%94725801601
FunctionConstant.java1111391%31381%72143741301
FunctionInput.java1029896%34193%43814811601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.parsing/FunctionParser.html b/docs/coverage/test/html/neureka.math.parsing/FunctionParser.html index 285086edb..233d28cdc 100644 --- a/docs/coverage/test/html/neureka.math.parsing/FunctionParser.html +++ b/docs/coverage/test/html/neureka.math.parsing/FunctionParser.html @@ -1 +1 @@ -FunctionParser

FunctionParser

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total60 of 92193%16 of 11085%16665172011
_buildFunction(String, boolean)2215587%22090%21222701
parse(String, boolean)165978%2880%2611201
parse(Operation, int, boolean)133371%1150%121701
_buildOperators(List, List, boolean)722696%62076%61403801
_parse(String, boolean)28699%44090%42317101
_groupAccordingToArity(int, List, String)58100%1583%1401001
static {...}19100%n/a010601
lambda$parse$1(int)11100%n/a010101
FunctionParser(BackendContext)6100%n/a010101
lambda$_groupAccordingToArity$2(int)4100%n/a010101
lambda$parse$0(int)4100%n/a010101
\ No newline at end of file +FunctionParser

FunctionParser

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total60 of 92193%16 of 11085%16665172011
_buildFunction(String, boolean)2215587%22090%21222701
parse(String, boolean)165978%2880%2611201
parse(Operation, int, boolean)133371%1150%121701
_buildOperators(List, List, boolean)722696%62076%61403801
_parse(String, boolean)28699%44090%42317101
_groupAccordingToArity(int, List, String)58100%1583%1401001
static {...}19100%n/a010601
lambda$parse$1(int)11100%n/a010101
FunctionParser(BackendContext)6100%n/a010101
lambda$_groupAccordingToArity$2(int)4100%n/a010101
lambda$parse$0(int)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.parsing/FunctionParser.java.html b/docs/coverage/test/html/neureka.math.parsing/FunctionParser.java.html index e3c8b8428..f03786c47 100644 --- a/docs/coverage/test/html/neureka.math.parsing/FunctionParser.java.html +++ b/docs/coverage/test/html/neureka.math.parsing/FunctionParser.java.html @@ -330,4 +330,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.parsing/ParseUtil.html b/docs/coverage/test/html/neureka.math.parsing/ParseUtil.html index bf114bacf..2df58301d 100644 --- a/docs/coverage/test/html/neureka.math.parsing/ParseUtil.html +++ b/docs/coverage/test/html/neureka.math.parsing/ParseUtil.html @@ -1 +1 @@ -ParseUtil

ParseUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total95 of 1,08191%47 of 21678%431219187013
cleanedHeadAndTail(String)3815880%172761%152333301
unpackAndCorrect(String)2620988%53186%41954701
groupBy(String, String, String, String)212150%5337%451701
similarity(String, String)617896%21890%21103101
findComponentIn(String, int)211298%11794%11002101
findParametersIn(String, int)210698%12596%11402001
isForbiddenChar(char)52100%151753%15170101
assumptionBasedOn(String)48100%8100%050901
parsedOperation(String, int)32100%1787%150601
_isOperationComponent(String, int, int)29100%8100%050301
numberOfOperationsWithin(List)23100%4100%030501
isAnOperation(String)17100%4100%030301
static {...}100%n/a010101
\ No newline at end of file +ParseUtil

ParseUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total95 of 1,08191%47 of 21678%431219187013
cleanedHeadAndTail(String)3815880%172761%152333301
unpackAndCorrect(String)2620988%53186%41954701
groupBy(String, String, String, String)212150%5337%451701
similarity(String, String)617896%21890%21103101
findComponentIn(String, int)211298%11794%11002101
findParametersIn(String, int)210698%12596%11402001
isForbiddenChar(char)52100%151753%15170101
assumptionBasedOn(String)48100%8100%050901
parsedOperation(String, int)32100%1787%150601
_isOperationComponent(String, int, int)29100%8100%050301
numberOfOperationsWithin(List)23100%4100%030501
isAnOperation(String)17100%4100%030301
static {...}100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.parsing/ParseUtil.java.html b/docs/coverage/test/html/neureka.math.parsing/ParseUtil.java.html index 406e19cb9..c3a896938 100644 --- a/docs/coverage/test/html/neureka.math.parsing/ParseUtil.java.html +++ b/docs/coverage/test/html/neureka.math.parsing/ParseUtil.java.html @@ -306,4 +306,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.parsing/index.html b/docs/coverage/test/html/neureka.math.parsing/index.html index 2afbf2f5e..9432226de 100644 --- a/docs/coverage/test/html/neureka.math.parsing/index.html +++ b/docs/coverage/test/html/neureka.math.parsing/index.html @@ -1 +1 @@ -neureka.math.parsing

neureka.math.parsing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total155 of 2,00292%63 of 32680%591871435902402
ParseUtil9598691%4716978%43121918701301
FunctionParser6086193%169485%1666517201101
\ No newline at end of file +neureka.math.parsing

neureka.math.parsing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total155 of 2,00292%63 of 32680%591871435902402
ParseUtil9598691%4716978%43121918701301
FunctionParser6086193%169485%1666517201101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math.parsing/index.source.html b/docs/coverage/test/html/neureka.math.parsing/index.source.html index 768ee369b..9067d0488 100644 --- a/docs/coverage/test/html/neureka.math.parsing/index.source.html +++ b/docs/coverage/test/html/neureka.math.parsing/index.source.html @@ -1 +1 @@ -neureka.math.parsing

neureka.math.parsing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total155 of 2,00292%63 of 32680%591871435902402
ParseUtil.java9598691%4716978%43121918701301
FunctionParser.java6086193%169485%1666517201101
\ No newline at end of file +neureka.math.parsing

neureka.math.parsing

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total155 of 2,00292%63 of 32680%591871435902402
ParseUtil.java9598691%4716978%43121918701301
FunctionParser.java6086193%169485%1666517201101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/Function$Callable.html b/docs/coverage/test/html/neureka.math/Function$Callable.html index d4b7253c7..870d994e1 100644 --- a/docs/coverage/test/html/neureka.math/Function$Callable.html +++ b/docs/coverage/test/html/neureka.math/Function$Callable.html @@ -1 +1 @@ -Function.Callable

Function.Callable

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 11100%0 of 0n/a020202
call(Tensor[])7100%n/a010101
invoke(Tensor[])4100%n/a010101
\ No newline at end of file +Function.Callable

Function.Callable

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 11100%0 of 0n/a020202
call(Tensor[])7100%n/a010101
invoke(Tensor[])4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/Function.html b/docs/coverage/test/html/neureka.math/Function.html index 1bfd581a4..0d88fb6ab 100644 --- a/docs/coverage/test/html/neureka.math/Function.html +++ b/docs/coverage/test/html/neureka.math/Function.html @@ -1 +1 @@ -Function

Function

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total100 of 39374%3 of 1478%144412581137
lambda$with$3(Args, Tensor[])266170%2675%250901
derive(Tensor[], int, int)110%n/a112211
derive(List, int, int)100%n/a111111
call(double)90%n/a111111
call(Args, Tensor[])90%n/a111111
call(Tensor[], int)80%n/a111111
invoke(double[], int)50%n/a111111
invoke(Args, Tensor[])50%n/a111111
invoke(Tensor[], int)50%n/a111111
invoke(double)40%n/a111111
invoke(double[])40%n/a111111
invoke(List)40%n/a111111
execute(Call)39100%1375%130501
getAllFunctions()27100%2100%020501
execute(Tensor[], int)18100%n/a010101
executeDerive(Tensor[], int, int)18100%n/a010101
numberOfArgs()13100%n/a010701
derive(Tensor[], int)10100%n/a010201
of(String, boolean)9100%n/a010101
call(Tensor)9100%n/a010101
derive(List, int)9100%n/a010101
call(List)8100%n/a010101
call(Call)7100%n/a010101
call(Tensor[])7100%n/a010101
executeDerive(Tensor[], int)6100%n/a010101
lambda$with$2(Args, Tensor[])6100%n/a010101
call(double[])5100%n/a010101
call(Call.Builder)5100%n/a010101
with(Arg[])5100%n/a010101
execute(Tensor[])5100%n/a010101
of(String)4100%n/a010101
invoke(Call.Builder)4100%n/a010101
with(Args)4100%n/a010101
invoke(Tensor)4100%n/a010101
invoke(Tensor[])4100%n/a010101
lambda$numberOfArgs$1(Function)3100%n/a010101
lambda$numberOfArgs$0(Function)3100%n/a010101
\ No newline at end of file +Function

Function

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total100 of 39374%3 of 1478%144412581137
lambda$with$3(Args, Tensor[])266170%2675%250901
derive(Tensor[], int, int)110%n/a112211
derive(List, int, int)100%n/a111111
call(double)90%n/a111111
call(Args, Tensor[])90%n/a111111
call(Tensor[], int)80%n/a111111
invoke(double[], int)50%n/a111111
invoke(Args, Tensor[])50%n/a111111
invoke(Tensor[], int)50%n/a111111
invoke(double)40%n/a111111
invoke(double[])40%n/a111111
invoke(List)40%n/a111111
execute(Call)39100%1375%130501
getAllFunctions()27100%2100%020501
execute(Tensor[], int)18100%n/a010101
executeDerive(Tensor[], int, int)18100%n/a010101
numberOfArgs()13100%n/a010701
derive(Tensor[], int)10100%n/a010201
of(String, boolean)9100%n/a010101
call(Tensor)9100%n/a010101
derive(List, int)9100%n/a010101
call(List)8100%n/a010101
call(Call)7100%n/a010101
call(Tensor[])7100%n/a010101
executeDerive(Tensor[], int)6100%n/a010101
lambda$with$2(Args, Tensor[])6100%n/a010101
call(double[])5100%n/a010101
call(Call.Builder)5100%n/a010101
with(Arg[])5100%n/a010101
execute(Tensor[])5100%n/a010101
of(String)4100%n/a010101
invoke(Call.Builder)4100%n/a010101
with(Args)4100%n/a010101
invoke(Tensor)4100%n/a010101
invoke(Tensor[])4100%n/a010101
lambda$numberOfArgs$1(Function)3100%n/a010101
lambda$numberOfArgs$0(Function)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/Function.java.html b/docs/coverage/test/html/neureka.math/Function.java.html index 89f106054..8633cbb5c 100644 --- a/docs/coverage/test/html/neureka.math/Function.java.html +++ b/docs/coverage/test/html/neureka.math/Function.java.html @@ -578,4 +578,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/FunctionCache$1.html b/docs/coverage/test/html/neureka.math/FunctionCache$1.html index ad81dd893..48a51f45a 100644 --- a/docs/coverage/test/html/neureka.math/FunctionCache$1.html +++ b/docs/coverage/test/html/neureka.math/FunctionCache$1.html @@ -1 +1 @@ -FunctionCache.new LinkedHashMap() {...}

FunctionCache.new LinkedHashMap() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1485%1 of 250%130202
removeEldestEntry(Map.Entry)2675%1150%120101
{...}6100%n/a010101
\ No newline at end of file +FunctionCache.new LinkedHashMap() {...}

FunctionCache.new LinkedHashMap() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1485%1 of 250%130202
removeEldestEntry(Map.Entry)2675%1150%120101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/FunctionCache.html b/docs/coverage/test/html/neureka.math/FunctionCache.html index dc889c1c2..d094f6297 100644 --- a/docs/coverage/test/html/neureka.math/FunctionCache.html +++ b/docs/coverage/test/html/neureka.math/FunctionCache.html @@ -1 +1 @@ -FunctionCache

FunctionCache

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 11185%1 of 887%1921505
put(Function)162863%1375%132701
get(String, boolean)19100%2100%020201
has(String, boolean)18100%2100%020201
toString()17100%n/a010101
FunctionCache()13100%n/a010301
\ No newline at end of file +FunctionCache

FunctionCache

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total16 of 11185%1 of 887%1921505
put(Function)162863%1375%132701
get(String, boolean)19100%2100%020201
has(String, boolean)18100%2100%020201
toString()17100%n/a010101
FunctionCache()13100%n/a010301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/FunctionCache.java.html b/docs/coverage/test/html/neureka.math/FunctionCache.java.html index 3224b3d26..886ef70d3 100644 --- a/docs/coverage/test/html/neureka.math/FunctionCache.java.html +++ b/docs/coverage/test/html/neureka.math/FunctionCache.java.html @@ -76,4 +76,4 @@ public String toString() { return this.getClass().getSimpleName()+"[size="+_functionCache.size()+"]"; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/Functions.html b/docs/coverage/test/html/neureka.math/Functions.html index 8e04d6fd5..d4c5926a4 100644 --- a/docs/coverage/test/html/neureka.math/Functions.html +++ b/docs/coverage/test/html/neureka.math/Functions.html @@ -1 +1 @@ -Functions

Functions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total113 of 59781%0 of 0n/a341013615734101
lambda$toString$0(Field)111760%n/a012301
getReshape()30%n/a111111
getRelayout()30%n/a111111
getPermute()30%n/a111111
getDimTrim()30%n/a111111
getIdy()30%n/a111111
getConv()30%n/a111111
getPlus()30%n/a111111
getPlusAssign()30%n/a111111
getMinus()30%n/a111111
getMinusAssign()30%n/a111111
getDiv()30%n/a111111
getDivAssign()30%n/a111111
getPow()30%n/a111111
getPowAssign()30%n/a111111
powAssign()30%n/a111111
getMul()30%n/a111111
getMulAssign()30%n/a111111
getAdd()30%n/a111111
getAddAssign()30%n/a111111
addAssign()30%n/a111111
getMod()30%n/a111111
getModAssign()30%n/a111111
modAssign()30%n/a111111
getNeg()30%n/a111111
getMatMul()30%n/a111111
getDot()30%n/a111111
getTranspose2D()30%n/a111111
getRandom()30%n/a111111
getSum()30%n/a111111
getExp()30%n/a111111
getLog10()30%n/a111111
getSqrt()30%n/a111111
getCbrt()30%n/a111111
getConcat()30%n/a111111
Functions(boolean)248100%n/a0105101
toString()27100%n/a010501
reshape()3100%n/a010101
relayout()3100%n/a010101
permute()3100%n/a010101
dimTrim()3100%n/a010101
idy()3100%n/a010101
conv()3100%n/a010101
plus()3100%n/a010101
plusAssign()3100%n/a010101
minus()3100%n/a010101
minusAssign()3100%n/a010101
div()3100%n/a010101
divAssign()3100%n/a010101
pow()3100%n/a010101
mul()3100%n/a010101
mulAssign()3100%n/a010101
add()3100%n/a010101
mod()3100%n/a010101
neg()3100%n/a010101
matMul()3100%n/a010101
dot()3100%n/a010101
transpose2D()3100%n/a010101
random()3100%n/a010101
getTanh()3100%n/a010101
tanh()3100%n/a010101
getFastTanh()3100%n/a010101
fastTanh()3100%n/a010101
getSoftsign()3100%n/a010101
softsign()3100%n/a010101
getSigmoid()3100%n/a010101
sigmoid()3100%n/a010101
getGaus()3100%n/a010101
gaus()3100%n/a010101
getFastGaus()3100%n/a010101
fastGaus()3100%n/a010101
getLn()3100%n/a010101
ln()3100%n/a010101
getQuad()3100%n/a010101
quad()3100%n/a010101
getRelu()3100%n/a010101
relu()3100%n/a010101
getAbs()3100%n/a010101
abs()3100%n/a010101
getSin()3100%n/a010101
sin()3100%n/a010101
getCos()3100%n/a010101
cos()3100%n/a010101
getSoftplus()3100%n/a010101
softplus()3100%n/a010101
getSilu()3100%n/a010101
silu()3100%n/a010101
getGelu()3100%n/a010101
gelu()3100%n/a010101
getSelu()3100%n/a010101
selu()3100%n/a010101
getMin()3100%n/a010101
min()3100%n/a010101
getMax()3100%n/a010101
max()3100%n/a010101
sum()3100%n/a010101
exp()3100%n/a010101
log10()3100%n/a010101
sqrt()3100%n/a010101
cbrt()3100%n/a010101
concat()3100%n/a010101
\ No newline at end of file +Functions

Functions

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total113 of 59781%0 of 0n/a341013615734101
lambda$toString$0(Field)111760%n/a012301
getReshape()30%n/a111111
getRelayout()30%n/a111111
getPermute()30%n/a111111
getDimTrim()30%n/a111111
getIdy()30%n/a111111
getConv()30%n/a111111
getPlus()30%n/a111111
getPlusAssign()30%n/a111111
getMinus()30%n/a111111
getMinusAssign()30%n/a111111
getDiv()30%n/a111111
getDivAssign()30%n/a111111
getPow()30%n/a111111
getPowAssign()30%n/a111111
powAssign()30%n/a111111
getMul()30%n/a111111
getMulAssign()30%n/a111111
getAdd()30%n/a111111
getAddAssign()30%n/a111111
addAssign()30%n/a111111
getMod()30%n/a111111
getModAssign()30%n/a111111
modAssign()30%n/a111111
getNeg()30%n/a111111
getMatMul()30%n/a111111
getDot()30%n/a111111
getTranspose2D()30%n/a111111
getRandom()30%n/a111111
getSum()30%n/a111111
getExp()30%n/a111111
getLog10()30%n/a111111
getSqrt()30%n/a111111
getCbrt()30%n/a111111
getConcat()30%n/a111111
Functions(boolean)248100%n/a0105101
toString()27100%n/a010501
reshape()3100%n/a010101
relayout()3100%n/a010101
permute()3100%n/a010101
dimTrim()3100%n/a010101
idy()3100%n/a010101
conv()3100%n/a010101
plus()3100%n/a010101
plusAssign()3100%n/a010101
minus()3100%n/a010101
minusAssign()3100%n/a010101
div()3100%n/a010101
divAssign()3100%n/a010101
pow()3100%n/a010101
mul()3100%n/a010101
mulAssign()3100%n/a010101
add()3100%n/a010101
mod()3100%n/a010101
neg()3100%n/a010101
matMul()3100%n/a010101
dot()3100%n/a010101
transpose2D()3100%n/a010101
random()3100%n/a010101
getTanh()3100%n/a010101
tanh()3100%n/a010101
getFastTanh()3100%n/a010101
fastTanh()3100%n/a010101
getSoftsign()3100%n/a010101
softsign()3100%n/a010101
getSigmoid()3100%n/a010101
sigmoid()3100%n/a010101
getGaus()3100%n/a010101
gaus()3100%n/a010101
getFastGaus()3100%n/a010101
fastGaus()3100%n/a010101
getLn()3100%n/a010101
ln()3100%n/a010101
getQuad()3100%n/a010101
quad()3100%n/a010101
getRelu()3100%n/a010101
relu()3100%n/a010101
getAbs()3100%n/a010101
abs()3100%n/a010101
getSin()3100%n/a010101
sin()3100%n/a010101
getCos()3100%n/a010101
cos()3100%n/a010101
getSoftplus()3100%n/a010101
softplus()3100%n/a010101
getSilu()3100%n/a010101
silu()3100%n/a010101
getGelu()3100%n/a010101
gelu()3100%n/a010101
getSelu()3100%n/a010101
selu()3100%n/a010101
getMin()3100%n/a010101
min()3100%n/a010101
getMax()3100%n/a010101
max()3100%n/a010101
sum()3100%n/a010101
exp()3100%n/a010101
log10()3100%n/a010101
sqrt()3100%n/a010101
cbrt()3100%n/a010101
concat()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/Functions.java.html b/docs/coverage/test/html/neureka.math/Functions.java.html index e34537e19..6d5b692f3 100644 --- a/docs/coverage/test/html/neureka.math/Functions.java.html +++ b/docs/coverage/test/html/neureka.math/Functions.java.html @@ -484,4 +484,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/index.html b/docs/coverage/test/html/neureka.math/index.html index 3e6ab3f28..a5e2ea1ad 100644 --- a/docs/coverage/test/html/neureka.math/index.html +++ b/docs/coverage/test/html/neureka.math/index.html @@ -1 +1 @@ -neureka.math

neureka.math

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total231 of 1,12679%5 of 2479%50159502334514705
Functions11348481%n/a34101361573410101
Function10029374%31178%14441258113701
FunctionCache169585%1787%192150501
FunctionCache.new LinkedHashMap() {...}1285%1150%13020201
Function.Callable11100%n/a02020201
\ No newline at end of file +neureka.math

neureka.math

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total231 of 1,12679%5 of 2479%50159502334514705
Functions11348481%n/a34101361573410101
Function10029374%31178%14441258113701
FunctionCache169585%1787%192150501
FunctionCache.new LinkedHashMap() {...}1285%1150%13020201
Function.Callable11100%n/a02020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.math/index.source.html b/docs/coverage/test/html/neureka.math/index.source.html index 20ea65437..5d92fd04a 100644 --- a/docs/coverage/test/html/neureka.math/index.source.html +++ b/docs/coverage/test/html/neureka.math/index.source.html @@ -1 +1 @@ -neureka.math

neureka.math

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total231 of 1,12679%5 of 2479%50159502334514705
Functions.java11348481%n/a34101361573410101
Function.java10030475%31178%14461260113902
FunctionCache.java1810785%2880%2122160702
\ No newline at end of file +neureka.math

neureka.math

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total231 of 1,12679%5 of 2479%50159502334514705
Functions.java11348481%n/a34101361573410101
Function.java10030475%31178%14461260113902
FunctionCache.java1810785%2880%2122160702
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.html index e7b176119..6d7567e9d 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.html @@ -1 +1 @@ -Permuted1DConfiguration

Permuted1DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 12582%2 of 450%620424418
Permuted1DConfiguration(int, int, int)81869%2250%230701
indexOfIndices(int)50%n/a111111
shape(int)30%n/a111111
indicesMap(int)30%n/a111111
strides(int)30%n/a111111
construct(int[], int[], int[])15100%n/a010101
indicesOfIndex(int)10100%n/a010101
shape()8100%n/a010101
indicesMap()8100%n/a010101
strides()8100%n/a010101
indexOfIndex(int)8100%n/a010101
spread()7100%n/a010101
offset()7100%n/a010101
indexOfIndices(int[])7100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +Permuted1DConfiguration

Permuted1DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total22 of 12582%2 of 450%620424418
Permuted1DConfiguration(int, int, int)81869%2250%230701
indexOfIndices(int)50%n/a111111
shape(int)30%n/a111111
indicesMap(int)30%n/a111111
strides(int)30%n/a111111
construct(int[], int[], int[])15100%n/a010101
indicesOfIndex(int)10100%n/a010101
shape()8100%n/a010101
indicesMap()8100%n/a010101
strides()8100%n/a010101
indexOfIndex(int)8100%n/a010101
spread()7100%n/a010101
offset()7100%n/a010101
indexOfIndices(int[])7100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.java.html index e8f0f377e..db0d641f2 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted1DConfiguration.java.html @@ -83,4 +83,4 @@ @Override public final int indexOfIndices( int d1 ) { return d1 * _stride; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.html index 1d7c089ff..a86ea321c 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.html @@ -1 +1 @@ -Permuted2DConfiguration

Permuted2DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 22296%2 of 666%220134117
indicesMap(int)80%20%221111
Permuted2DConfiguration(int[], int[], int[])33100%n/a010801
indicesOfIndex(int)30100%n/a010501
indexOfIndices(int[])22100%n/a010401
indexOfIndex(int)19100%n/a010101
indexOfIndices(int, int)18100%n/a010401
shape()13100%n/a010101
indicesMap()13100%n/a010101
strides()13100%n/a010101
spread()11100%n/a010101
offset()11100%n/a010101
construct(int[], int[], int[])9100%n/a010101
shape(int)8100%2100%020101
strides(int)8100%2100%020101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file +Permuted2DConfiguration

Permuted2DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 22296%2 of 666%220134117
indicesMap(int)80%20%221111
Permuted2DConfiguration(int[], int[], int[])33100%n/a010801
indicesOfIndex(int)30100%n/a010501
indexOfIndices(int[])22100%n/a010401
indexOfIndex(int)19100%n/a010101
indexOfIndices(int, int)18100%n/a010401
shape()13100%n/a010101
indicesMap()13100%n/a010101
strides()13100%n/a010101
spread()11100%n/a010101
offset()11100%n/a010101
construct(int[], int[], int[])9100%n/a010101
shape(int)8100%2100%020101
strides(int)8100%2100%020101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.java.html index 3d6fbb1dd..cb5ae2fb7 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted2DConfiguration.java.html @@ -106,4 +106,4 @@ return i; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.html index ad482590d..9b0a9f937 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.html @@ -1 +1 @@ -Permuted3DConfiguration

Permuted3DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 30590%8 of 1233%623237217
indicesMap(int)140%40%331111
strides(int)140%40%331111
Permuted3DConfiguration(int[], int[], int[])48100%n/a0101101
indexOfIndex(int)40100%n/a010601
indicesOfIndex(int)40100%n/a010601
indexOfIndices(int[])21100%n/a010101
shape()18100%n/a010101
indicesMap()18100%n/a010101
strides()18100%n/a010101
spread()15100%n/a010101
offset()15100%n/a010101
indexOfIndices(int, int, int)15100%n/a010101
shape(int)14100%4100%030101
construct(int[], int[], int[])9100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file +Permuted3DConfiguration

Permuted3DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 30590%8 of 1233%623237217
indicesMap(int)140%40%331111
strides(int)140%40%331111
Permuted3DConfiguration(int[], int[], int[])48100%n/a0101101
indexOfIndex(int)40100%n/a010601
indicesOfIndex(int)40100%n/a010601
indexOfIndices(int[])21100%n/a010101
shape()18100%n/a010101
indicesMap()18100%n/a010101
strides()18100%n/a010101
spread()15100%n/a010101
offset()15100%n/a010101
indexOfIndices(int, int, int)15100%n/a010101
shape(int)14100%4100%030101
construct(int[], int[], int[])9100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.java.html index bbe33b52a..d32ba0b7d 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/Permuted3DConfiguration.java.html @@ -119,4 +119,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.html index a1e054520..b8657c05b 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.html @@ -1 +1 @@ -PermutedNDConfiguration

PermutedNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 14496%0 of 4100%118131116
indicesMap(int)50%n/a111111
indicesOfIndex(int)34100%2100%020501
indexOfIndices(int[])24100%2100%020401
PermutedNDConfiguration(int[], int[], int[])15100%n/a010501
construct(int[], int[], int[])9100%n/a010101
spread()9100%n/a010301
offset()9100%n/a010301
indexOfIndex(int)6100%n/a010101
shape()5100%n/a010101
shape(int)5100%n/a010101
indicesMap()5100%n/a010101
strides()5100%n/a010101
strides(int)5100%n/a010101
rank()4100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file +PermutedNDConfiguration

PermutedNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 14496%0 of 4100%118131116
indicesMap(int)50%n/a111111
indicesOfIndex(int)34100%2100%020501
indexOfIndices(int[])24100%2100%020401
PermutedNDConfiguration(int[], int[], int[])15100%n/a010501
construct(int[], int[], int[])9100%n/a010101
spread()9100%n/a010301
offset()9100%n/a010301
indexOfIndex(int)6100%n/a010101
shape()5100%n/a010101
shape(int)5100%n/a010101
indicesMap()5100%n/a010101
strides()5100%n/a010101
strides(int)5100%n/a010101
rank()4100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.java.html index 6c1f8faf3..89ebef467 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/PermutedNDConfiguration.java.html @@ -106,4 +106,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.html index cfa1771fa..b7f04647c 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.html @@ -1 +1 @@ -neureka.ndim.config.types.permuted

neureka.ndim.config.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total63 of 79692%12 of 2653%1581812686804
Permuted3DConfiguration2827790%8433%62323721701
Permuted1DConfiguration2210382%2250%62042441801
Permuted2DConfiguration821496%2466%22013411701
PermutedNDConfiguration513996%4100%11813111601
\ No newline at end of file +neureka.ndim.config.types.permuted

neureka.ndim.config.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total63 of 79692%12 of 2653%1581812686804
Permuted3DConfiguration2827790%8433%62323721701
Permuted1DConfiguration2210382%2250%62042441801
Permuted2DConfiguration821496%2466%22013411701
PermutedNDConfiguration513996%4100%11813111601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.source.html b/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.source.html index 80af5c93f..9aa545ae2 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.permuted/index.source.html @@ -1 +1 @@ -neureka.ndim.config.types.permuted

neureka.ndim.config.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total63 of 79692%12 of 2653%1581812686804
Permuted3DConfiguration.java2827790%8433%62323721701
Permuted1DConfiguration.java2210382%2250%62042441801
Permuted2DConfiguration.java821496%2466%22013411701
PermutedNDConfiguration.java513996%4100%11813111601
\ No newline at end of file +neureka.ndim.config.types.permuted

neureka.ndim.config.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total63 of 79692%12 of 2653%1581812686804
Permuted3DConfiguration.java2827790%8433%62323721701
Permuted1DConfiguration.java2210382%2250%62042441801
Permuted2DConfiguration.java821496%2466%22013411701
PermutedNDConfiguration.java513996%4100%11813111601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.html index ab5386e8d..825afe0ee 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.html @@ -1 +1 @@ -Simple0DConfiguration

Simple0DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 66100%0 of 0n/a015015015
shape()7100%n/a010101
indicesMap()7100%n/a010101
strides()7100%n/a010101
spread()7100%n/a010101
offset()7100%n/a010101
indicesOfIndex(int)7100%n/a010101
construct()6100%n/a010101
indexOfIndices(int[])4100%n/a010101
rank()2100%n/a010101
shape(int)2100%n/a010101
indicesMap(int)2100%n/a010101
strides(int)2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
indexOfIndex(int)2100%n/a010101
\ No newline at end of file +Simple0DConfiguration

Simple0DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 66100%0 of 0n/a015015015
shape()7100%n/a010101
indicesMap()7100%n/a010101
strides()7100%n/a010101
spread()7100%n/a010101
offset()7100%n/a010101
indicesOfIndex(int)7100%n/a010101
construct()6100%n/a010101
indexOfIndices(int[])4100%n/a010101
rank()2100%n/a010101
shape(int)2100%n/a010101
indicesMap(int)2100%n/a010101
strides(int)2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
indexOfIndex(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.java.html index eac5d136c..1df95a7be 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple0DConfiguration.java.html @@ -54,4 +54,4 @@ @Override public int indexOfIndices(int[] indices) { return indices[ 0 ]; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.html index 2a9cd6d46..08e6fe440 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.html @@ -1 +1 @@ -Simple1DConfiguration

Simple1DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 98100%0 of 0n/a017020017
construct(int[], int[])12100%n/a010101
indicesOfIndex(int)10100%n/a010101
Simple1DConfiguration(int, int)9100%n/a010401
shape()8100%n/a010101
indicesMap()8100%n/a010101
strides()8100%n/a010101
spread()7100%n/a010101
offset()7100%n/a010101
indexOfIndices(int[])7100%n/a010101
indexOfIndices(int)5100%n/a010101
shape(int)3100%n/a010101
indicesMap(int)3100%n/a010101
strides(int)3100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
indexOfIndex(int)2100%n/a010101
\ No newline at end of file +Simple1DConfiguration

Simple1DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 98100%0 of 0n/a017020017
construct(int[], int[])12100%n/a010101
indicesOfIndex(int)10100%n/a010101
Simple1DConfiguration(int, int)9100%n/a010401
shape()8100%n/a010101
indicesMap()8100%n/a010101
strides()8100%n/a010101
spread()7100%n/a010101
offset()7100%n/a010101
indexOfIndices(int[])7100%n/a010101
indexOfIndices(int)5100%n/a010101
shape(int)3100%n/a010101
indicesMap(int)3100%n/a010101
strides(int)3100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
indexOfIndex(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.java.html index c5b8a792f..0c89eb003 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple1DConfiguration.java.html @@ -76,4 +76,4 @@ @Override public final int indexOfIndices( int d1 ) { return d1 * _stride_and_indicesMap; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.html index d93531712..b6e32c675 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.html @@ -1 +1 @@ -Simple2DConfiguration

Simple2DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 211100%0 of 6100%020032017
indicesOfIndex(int)30100%n/a010501
Simple2DConfiguration(int[], int[])23100%n/a010601
indexOfIndices(int[])22100%n/a010401
indexOfIndex(int)19100%n/a010101
indexOfIndices(int, int)18100%n/a010401
shape()13100%n/a010101
indicesMap()13100%n/a010101
strides()13100%n/a010101
spread()11100%n/a010101
offset()11100%n/a010101
construct(int[], int[])8100%n/a010101
shape(int)8100%2100%020101
indicesMap(int)8100%2100%020101
strides(int)8100%2100%020101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file +Simple2DConfiguration

Simple2DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 211100%0 of 6100%020032017
indicesOfIndex(int)30100%n/a010501
Simple2DConfiguration(int[], int[])23100%n/a010601
indexOfIndices(int[])22100%n/a010401
indexOfIndex(int)19100%n/a010101
indexOfIndices(int, int)18100%n/a010401
shape()13100%n/a010101
indicesMap()13100%n/a010101
strides()13100%n/a010101
spread()11100%n/a010101
offset()11100%n/a010101
construct(int[], int[])8100%n/a010101
shape(int)8100%2100%020101
indicesMap(int)8100%2100%020101
strides(int)8100%2100%020101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.java.html index d7d2dc6f8..e0bbe7f5a 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple2DConfiguration.java.html @@ -99,4 +99,4 @@ return i; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.html index 2d8e9a08b..8be3a0f7f 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.html @@ -1 +1 @@ -Simple3DConfiguration

Simple3DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 289100%0 of 12100%023034017
indexOfIndex(int)40100%n/a010601
indicesOfIndex(int)40100%n/a010601
Simple3DConfiguration(int[], int[])33100%n/a010801
indexOfIndices(int[])21100%n/a010101
shape()18100%n/a010101
indicesMap()18100%n/a010101
strides()18100%n/a010101
spread()15100%n/a010101
offset()15100%n/a010101
indexOfIndices(int, int, int)15100%n/a010101
shape(int)14100%4100%030101
indicesMap(int)14100%4100%030101
strides(int)14100%4100%030101
construct(int[], int[])8100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file +Simple3DConfiguration

Simple3DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 289100%0 of 12100%023034017
indexOfIndex(int)40100%n/a010601
indicesOfIndex(int)40100%n/a010601
Simple3DConfiguration(int[], int[])33100%n/a010801
indexOfIndices(int[])21100%n/a010101
shape()18100%n/a010101
indicesMap()18100%n/a010101
strides()18100%n/a010101
spread()15100%n/a010101
offset()15100%n/a010101
indexOfIndices(int, int, int)15100%n/a010101
shape(int)14100%4100%030101
indicesMap(int)14100%4100%030101
strides(int)14100%4100%030101
construct(int[], int[])8100%n/a010101
rank()2100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.java.html index 764e81f58..05c1be9c8 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/Simple3DConfiguration.java.html @@ -107,4 +107,4 @@ d3 * _stride3; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.html index c6d674167..b787218a4 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.html @@ -1 +1 @@ -SimpleNDConfiguration

SimpleNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 136100%0 of 4100%018027016
indicesOfIndex(int)34100%2100%020501
indexOfIndices(int[])24100%2100%020301
SimpleNDConfiguration(int[], int[])11100%n/a010401
spread()10100%n/a010301
construct(int[], int[])8100%n/a010101
indexOfIndex(int)6100%n/a010101
shape()5100%n/a010101
shape(int)5100%n/a010101
indicesMap()5100%n/a010101
indicesMap(int)5100%n/a010101
strides()5100%n/a010101
strides(int)5100%n/a010101
offset()5100%n/a010101
rank()4100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file +SimpleNDConfiguration

SimpleNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 136100%0 of 4100%018027016
indicesOfIndex(int)34100%2100%020501
indexOfIndices(int[])24100%2100%020301
SimpleNDConfiguration(int[], int[])11100%n/a010401
spread()10100%n/a010301
construct(int[], int[])8100%n/a010101
indexOfIndex(int)6100%n/a010101
shape()5100%n/a010101
shape(int)5100%n/a010101
indicesMap()5100%n/a010101
indicesMap(int)5100%n/a010101
strides()5100%n/a010101
strides(int)5100%n/a010101
offset()5100%n/a010101
rank()4100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.java.html index 4fc437e1c..63323eb44 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/SimpleNDConfiguration.java.html @@ -88,4 +88,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/index.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/index.html index 3400f8944..7b4701a88 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/index.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/index.html @@ -1 +1 @@ -neureka.ndim.config.types.simple

neureka.ndim.config.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 800100%0 of 22100%093012808205
Simple3DConfiguration289100%12100%02303401701
Simple2DConfiguration211100%6100%02003201701
SimpleNDConfiguration136100%4100%01802701601
Simple1DConfiguration98100%n/a01702001701
Simple0DConfiguration66100%n/a01501501501
\ No newline at end of file +neureka.ndim.config.types.simple

neureka.ndim.config.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 800100%0 of 22100%093012808205
Simple3DConfiguration289100%12100%02303401701
Simple2DConfiguration211100%6100%02003201701
SimpleNDConfiguration136100%4100%01802701601
Simple1DConfiguration98100%n/a01702001701
Simple0DConfiguration66100%n/a01501501501
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.simple/index.source.html b/docs/coverage/test/html/neureka.ndim.config.types.simple/index.source.html index 1c681fad4..f28ab1a29 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.simple/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.simple/index.source.html @@ -1 +1 @@ -neureka.ndim.config.types.simple

neureka.ndim.config.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 800100%0 of 22100%093012808205
Simple3DConfiguration.java289100%12100%02303401701
Simple2DConfiguration.java211100%6100%02003201701
SimpleNDConfiguration.java136100%4100%01802701601
Simple1DConfiguration.java98100%n/a01702001701
Simple0DConfiguration.java66100%n/a01501501501
\ No newline at end of file +neureka.ndim.config.types.simple

neureka.ndim.config.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 800100%0 of 22100%093012808205
Simple3DConfiguration.java289100%12100%02303401701
Simple2DConfiguration.java211100%6100%02003201701
SimpleNDConfiguration.java136100%4100%01802701601
Simple1DConfiguration.java98100%n/a01702001701
Simple0DConfiguration.java66100%n/a01501501501
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.html index f54d103ef..ac5223053 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.html @@ -1 +1 @@ -Sliced0DConfiguration

Sliced0DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 91100%0 of 0n/a016019016
construct(int[], int[])12100%n/a010101
Sliced0DConfiguration(int, int)9100%n/a010401
shape()8100%n/a010101
offset()8100%n/a010101
indicesMap()7100%n/a010101
strides()7100%n/a010101
spread()7100%n/a010101
indicesOfIndex(int)7100%n/a010101
indexOfIndices(int[])7100%n/a010101
indexOfIndex(int)5100%n/a010101
shape(int)3100%n/a010101
offset(int)3100%n/a010101
rank()2100%n/a010101
indicesMap(int)2100%n/a010101
strides(int)2100%n/a010101
spread(int)2100%n/a010101
\ No newline at end of file +Sliced0DConfiguration

Sliced0DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 91100%0 of 0n/a016019016
construct(int[], int[])12100%n/a010101
Sliced0DConfiguration(int, int)9100%n/a010401
shape()8100%n/a010101
offset()8100%n/a010101
indicesMap()7100%n/a010101
strides()7100%n/a010101
spread()7100%n/a010101
indicesOfIndex(int)7100%n/a010101
indexOfIndices(int[])7100%n/a010101
indexOfIndex(int)5100%n/a010101
shape(int)3100%n/a010101
offset(int)3100%n/a010101
rank()2100%n/a010101
indicesMap(int)2100%n/a010101
strides(int)2100%n/a010101
spread(int)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.java.html index 15805d0a4..6ee8e2dec 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced0DConfiguration.java.html @@ -78,4 +78,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.html index d4fe721a5..1df2b4a17 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.html @@ -1 +1 @@ -Sliced1DConfiguration

Sliced1DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 16692%3 of 650%321027018
Sliced1DConfiguration(int, int, int, int, int)122769%3350%3401001
construct(int[], int[], int[], int[], int[])21100%n/a010101
indexOfIndex(int)14100%n/a010101
indexOfIndices(int[])13100%n/a010101
indexOfIndices(int)11100%n/a010101
indicesOfIndex(int)10100%n/a010101
shape()8100%n/a010101
indicesMap()8100%n/a010101
strides()8100%n/a010101
spread()8100%n/a010101
offset()8100%n/a010101
shape(int)3100%n/a010101
indicesMap(int)3100%n/a010101
strides(int)3100%n/a010101
spread(int)3100%n/a010101
offset(int)3100%n/a010101
rank()2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file +Sliced1DConfiguration

Sliced1DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total12 of 16692%3 of 650%321027018
Sliced1DConfiguration(int, int, int, int, int)122769%3350%3401001
construct(int[], int[], int[], int[], int[])21100%n/a010101
indexOfIndex(int)14100%n/a010101
indexOfIndices(int[])13100%n/a010101
indexOfIndices(int)11100%n/a010101
indicesOfIndex(int)10100%n/a010101
shape()8100%n/a010101
indicesMap()8100%n/a010101
strides()8100%n/a010101
spread()8100%n/a010101
offset()8100%n/a010101
shape(int)3100%n/a010101
indicesMap(int)3100%n/a010101
strides(int)3100%n/a010101
spread(int)3100%n/a010101
offset(int)3100%n/a010101
rank()2100%n/a010101
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.java.html index 6f373613c..5cb998218 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced1DConfiguration.java.html @@ -99,4 +99,4 @@ @Override public final int indexOfIndices( int d1 ) { return ( d1 * _spread + _offset ) * _stride; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.html index f0f696d33..91c9c1f9c 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.html @@ -1 +1 @@ -Sliced2DConfiguration

Sliced2DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 29698%1 of 1090%122038017
indicesMap(int)3562%1150%120101
Sliced2DConfiguration(int[], int[], int[], int[], int[])53100%n/a0101201
indexOfIndices(int[])34100%n/a010401
indexOfIndex(int)31100%n/a010101
indicesOfIndex(int)30100%n/a010501
indexOfIndices(int, int)30100%n/a010401
shape()13100%n/a010101
indicesMap()13100%n/a010101
strides()13100%n/a010101
spread()13100%n/a010101
offset()13100%n/a010101
construct(int[], int[], int[], int[], int[])11100%n/a010101
shape(int)8100%2100%020101
strides(int)8100%2100%020101
spread(int)8100%2100%020101
offset(int)8100%2100%020101
rank()2100%n/a010101
\ No newline at end of file +Sliced2DConfiguration

Sliced2DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 29698%1 of 1090%122038017
indicesMap(int)3562%1150%120101
Sliced2DConfiguration(int[], int[], int[], int[], int[])53100%n/a0101201
indexOfIndices(int[])34100%n/a010401
indexOfIndex(int)31100%n/a010101
indicesOfIndex(int)30100%n/a010501
indexOfIndices(int, int)30100%n/a010401
shape()13100%n/a010101
indicesMap()13100%n/a010101
strides()13100%n/a010101
spread()13100%n/a010101
offset()13100%n/a010101
construct(int[], int[], int[], int[], int[])11100%n/a010101
shape(int)8100%2100%020101
strides(int)8100%2100%020101
spread(int)8100%2100%020101
offset(int)8100%2100%020101
rank()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.java.html index 761fc1fa0..9923394fe 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced2DConfiguration.java.html @@ -124,4 +124,4 @@ return i; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.html index b2a177698..e0ea3c0a3 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.html @@ -1 +1 @@ -Sliced3DConfiguration

Sliced3DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 42193%8 of 2060%627243217
indicesMap(int)140%40%331111
strides(int)140%40%331111
Sliced3DConfiguration(int[], int[], int[], int[], int[])78100%n/a0101701
indexOfIndex(int)58100%n/a010601
indicesOfIndex(int)40100%n/a010601
indexOfIndices(int[])39100%n/a010101
indexOfIndices(int, int, int)33100%n/a010101
shape()18100%n/a010101
indicesMap()18100%n/a010101
strides()18100%n/a010101
spread()18100%n/a010101
offset()18100%n/a010101
shape(int)14100%4100%030101
spread(int)14100%4100%030101
offset(int)14100%4100%030101
construct(int[], int[], int[], int[], int[])11100%n/a010101
rank()2100%n/a010101
\ No newline at end of file +Sliced3DConfiguration

Sliced3DConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 42193%8 of 2060%627243217
indicesMap(int)140%40%331111
strides(int)140%40%331111
Sliced3DConfiguration(int[], int[], int[], int[], int[])78100%n/a0101701
indexOfIndex(int)58100%n/a010601
indicesOfIndex(int)40100%n/a010601
indexOfIndices(int[])39100%n/a010101
indexOfIndices(int, int, int)33100%n/a010101
shape()18100%n/a010101
indicesMap()18100%n/a010101
strides()18100%n/a010101
spread()18100%n/a010101
offset()18100%n/a010101
shape(int)14100%4100%030101
spread(int)14100%4100%030101
offset(int)14100%4100%030101
construct(int[], int[], int[], int[], int[])11100%n/a010101
rank()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.java.html index a27c5d712..013632c3d 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/Sliced3DConfiguration.java.html @@ -147,4 +147,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.html index e2759d853..12ab61353 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.html @@ -1 +1 @@ -SlicedNDConfiguration

SlicedNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 16293%0 of 4100%218229216
indicesMap(int)50%n/a111111
strides(int)50%n/a111111
indicesOfIndex(int)34100%2100%020501
indexOfIndices(int[])34100%2100%020401
SlicedNDConfiguration(int[], int[], int[], int[], int[])23100%n/a010701
construct(int[], int[], int[], int[], int[])11100%n/a010101
indexOfIndex(int)6100%n/a010101
shape()5100%n/a010101
shape(int)5100%n/a010101
indicesMap()5100%n/a010101
strides()5100%n/a010101
spread()5100%n/a010101
spread(int)5100%n/a010101
offset()5100%n/a010101
offset(int)5100%n/a010101
rank()4100%n/a010101
\ No newline at end of file +SlicedNDConfiguration

SlicedNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total10 of 16293%0 of 4100%218229216
indicesMap(int)50%n/a111111
strides(int)50%n/a111111
indicesOfIndex(int)34100%2100%020501
indexOfIndices(int[])34100%2100%020401
SlicedNDConfiguration(int[], int[], int[], int[], int[])23100%n/a010701
construct(int[], int[], int[], int[], int[])11100%n/a010101
indexOfIndex(int)6100%n/a010101
shape()5100%n/a010101
shape(int)5100%n/a010101
indicesMap()5100%n/a010101
strides()5100%n/a010101
spread()5100%n/a010101
spread(int)5100%n/a010101
offset()5100%n/a010101
offset(int)5100%n/a010101
rank()4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.java.html index e496c6407..201a5ea5d 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/SlicedNDConfiguration.java.html @@ -112,4 +112,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.html index d1f47bf7c..26d1f2381 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.html @@ -1 +1 @@ -neureka.ndim.config.types.sliced

neureka.ndim.config.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total53 of 1,13695%12 of 4070%12104415648405
Sliced3DConfiguration2839393%81260%62724321701
Sliced1DConfiguration1215492%3350%32102701801
SlicedNDConfiguration1015293%4100%21822921601
Sliced2DConfiguration29398%1990%12203801701
Sliced0DConfiguration91100%n/a01601901601
\ No newline at end of file +neureka.ndim.config.types.sliced

neureka.ndim.config.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total53 of 1,13695%12 of 4070%12104415648405
Sliced3DConfiguration2839393%81260%62724321701
Sliced1DConfiguration1215492%3350%32102701801
SlicedNDConfiguration1015293%4100%21822921601
Sliced2DConfiguration29398%1990%12203801701
Sliced0DConfiguration91100%n/a01601901601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.source.html b/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.source.html index 4b98345d6..6d6fec583 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.sliced/index.source.html @@ -1 +1 @@ -neureka.ndim.config.types.sliced

neureka.ndim.config.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total53 of 1,13695%12 of 4070%12104415648405
Sliced3DConfiguration.java2839393%81260%62724321701
Sliced1DConfiguration.java1215492%3350%32102701801
SlicedNDConfiguration.java1015293%4100%21822921601
Sliced2DConfiguration.java29398%1990%12203801701
Sliced0DConfiguration.java91100%n/a01601901601
\ No newline at end of file +neureka.ndim.config.types.sliced

neureka.ndim.config.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total53 of 1,13695%12 of 4070%12104415648405
Sliced3DConfiguration.java2839393%81260%62724321701
Sliced1DConfiguration.java1215492%3350%32102701801
SlicedNDConfiguration.java1015293%4100%21822921601
Sliced2DConfiguration.java29398%1990%12203801701
Sliced0DConfiguration.java91100%n/a01601901601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.html b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.html index 15d355ea1..a67effd0d 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.html @@ -1 +1 @@ -VirtualNDConfiguration

VirtualNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 6093%0 of 0n/a217217217
indicesMap(int)20%n/a111111
strides(int)20%n/a111111
VirtualNDConfiguration(int[])7100%n/a010101
construct(int[])7100%n/a010101
shape(int)5100%n/a010101
rank()4100%n/a010101
indicesMap()4100%n/a010101
strides()4100%n/a010101
spread()4100%n/a010101
offset()4100%n/a010101
indicesOfIndex(int)4100%n/a010101
shape()3100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
indexOfIndex(int)2100%n/a010101
indexOfIndices(int[])2100%n/a010101
isVirtual()2100%n/a010101
\ No newline at end of file +VirtualNDConfiguration

VirtualNDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 6093%0 of 0n/a217217217
indicesMap(int)20%n/a111111
strides(int)20%n/a111111
VirtualNDConfiguration(int[])7100%n/a010101
construct(int[])7100%n/a010101
shape(int)5100%n/a010101
rank()4100%n/a010101
indicesMap()4100%n/a010101
strides()4100%n/a010101
spread()4100%n/a010101
offset()4100%n/a010101
indicesOfIndex(int)4100%n/a010101
shape()3100%n/a010101
spread(int)2100%n/a010101
offset(int)2100%n/a010101
indexOfIndex(int)2100%n/a010101
indexOfIndices(int[])2100%n/a010101
isVirtual()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.java.html index 720da407b..81d34280e 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/VirtualNDConfiguration.java.html @@ -68,4 +68,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.html b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.html index d7b205341..8a8238273 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.html @@ -1 +1 @@ -neureka.ndim.config.types.views.virtual

neureka.ndim.config.types.views.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total4 of 6093%0 of 0n/a21721721701
VirtualNDConfiguration45693%n/a21721721701
\ No newline at end of file +neureka.ndim.config.types.views.virtual

neureka.ndim.config.types.views.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total4 of 6093%0 of 0n/a21721721701
VirtualNDConfiguration45693%n/a21721721701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.source.html b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.source.html index 5f4d01cf2..cf0ea65c1 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views.virtual/index.source.html @@ -1 +1 @@ -neureka.ndim.config.types.views.virtual

neureka.ndim.config.types.views.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total4 of 6093%0 of 0n/a21721721701
VirtualNDConfiguration.java45693%n/a21721721701
\ No newline at end of file +neureka.ndim.config.types.views.virtual

neureka.ndim.config.types.views.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total4 of 6093%0 of 0n/a21721721701
VirtualNDConfiguration.java45693%n/a21721721701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.html b/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.html index e96a52a8c..92df083f4 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.html @@ -1 +1 @@ -SimpleReshapeView

SimpleReshapeView

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total20 of 19689%1 of 1291%423439317
SimpleReshapeView(int[], NDConfiguration)57093%1583%1411701
shape(int)50%n/a111111
indicesMap(int)50%n/a111111
strides(int)50%n/a111111
indicesOfIndex(int)34100%2100%020501
_rearrange(int[], int[], int[])22100%4100%030301
indexOfIndices(int[])12100%n/a010201
indexOfIndex(int)6100%n/a010101
spread(int)5100%n/a010101
offset(int)5100%n/a010101
rank()4100%n/a010101
shape()3100%n/a010101
indicesMap()3100%n/a010101
strides()3100%n/a010101
spread()3100%n/a010101
offset()3100%n/a010101
lambda$new$0(Integer)3100%n/a010101
\ No newline at end of file +SimpleReshapeView

SimpleReshapeView

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total20 of 19689%1 of 1291%423439317
SimpleReshapeView(int[], NDConfiguration)57093%1583%1411701
shape(int)50%n/a111111
indicesMap(int)50%n/a111111
strides(int)50%n/a111111
indicesOfIndex(int)34100%2100%020501
_rearrange(int[], int[], int[])22100%4100%030301
indexOfIndices(int[])12100%n/a010201
indexOfIndex(int)6100%n/a010101
spread(int)5100%n/a010101
offset(int)5100%n/a010101
rank()4100%n/a010101
shape()3100%n/a010101
indicesMap()3100%n/a010101
strides()3100%n/a010101
spread()3100%n/a010101
offset()3100%n/a010101
lambda$new$0(Integer)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.java.html b/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.java.html index 540076bbf..93ca3077b 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views/SimpleReshapeView.java.html @@ -107,4 +107,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views/index.html b/docs/coverage/test/html/neureka.ndim.config.types.views/index.html index 65d741c1b..07e582028 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views/index.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views/index.html @@ -1 +1 @@ -neureka.ndim.config.types.views

neureka.ndim.config.types.views

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total20 of 19689%1 of 1291%42343931701
SimpleReshapeView2017689%11191%42343931701
\ No newline at end of file +neureka.ndim.config.types.views

neureka.ndim.config.types.views

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total20 of 19689%1 of 1291%42343931701
SimpleReshapeView2017689%11191%42343931701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types.views/index.source.html b/docs/coverage/test/html/neureka.ndim.config.types.views/index.source.html index 64180b66d..03ed84705 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types.views/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.config.types.views/index.source.html @@ -1 +1 @@ -neureka.ndim.config.types.views

neureka.ndim.config.types.views

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total20 of 19689%1 of 1291%42343931701
SimpleReshapeView.java2017689%11191%42343931701
\ No newline at end of file +neureka.ndim.config.types.views

neureka.ndim.config.types.views

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total20 of 19689%1 of 1291%42343931701
SimpleReshapeView.java2017689%11191%42343931701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/D1C.html b/docs/coverage/test/html/neureka.ndim.config.types/D1C.html index c5580511e..1568e0086 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/D1C.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/D1C.html @@ -1 +1 @@ -D1C

D1C

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 3100%0 of 0n/a010101
D1C()3100%n/a010101
\ No newline at end of file +D1C

D1C

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 3100%0 of 0n/a010101
D1C()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/D1C.java.html b/docs/coverage/test/html/neureka.ndim.config.types/D1C.java.html index 172d5cd6a..0f1964d48 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/D1C.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/D1C.java.html @@ -10,4 +10,4 @@ { public abstract int indexOfIndices( int d1 ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/D2C.html b/docs/coverage/test/html/neureka.ndim.config.types/D2C.html index f3423b942..4ddac8f8c 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/D2C.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/D2C.html @@ -1 +1 @@ -D2C

D2C

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 3100%0 of 0n/a010101
D2C()3100%n/a010101
\ No newline at end of file +D2C

D2C

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 3100%0 of 0n/a010101
D2C()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/D2C.java.html b/docs/coverage/test/html/neureka.ndim.config.types/D2C.java.html index 98ec9bcde..4ae977e61 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/D2C.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/D2C.java.html @@ -10,4 +10,4 @@ { public abstract int indexOfIndices( int d1, int d2 ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/D3C.html b/docs/coverage/test/html/neureka.ndim.config.types/D3C.html index 9c1f84370..5d93762c9 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/D3C.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/D3C.html @@ -1 +1 @@ -D3C

D3C

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 3100%0 of 0n/a010101
D3C()3100%n/a010101
\ No newline at end of file +D3C

D3C

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 3100%0 of 0n/a010101
D3C()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/D3C.java.html b/docs/coverage/test/html/neureka.ndim.config.types/D3C.java.html index 19651eb6a..a581db9a2 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/D3C.java.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/D3C.java.html @@ -10,4 +10,4 @@ { public abstract int indexOfIndices( int d1, int d2, int d3 ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/index.html b/docs/coverage/test/html/neureka.ndim.config.types/index.html index 2658dcaac..258c59485 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/index.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/index.html @@ -1 +1 @@ -neureka.ndim.config.types

neureka.ndim.config.types

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 9100%0 of 0n/a03030303
D3C3100%n/a01010101
D1C3100%n/a01010101
D2C3100%n/a01010101
\ No newline at end of file +neureka.ndim.config.types

neureka.ndim.config.types

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 9100%0 of 0n/a03030303
D3C3100%n/a01010101
D1C3100%n/a01010101
D2C3100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config.types/index.source.html b/docs/coverage/test/html/neureka.ndim.config.types/index.source.html index 95eb9dda6..f7c3e34f9 100644 --- a/docs/coverage/test/html/neureka.ndim.config.types/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.config.types/index.source.html @@ -1 +1 @@ -neureka.ndim.config.types

neureka.ndim.config.types

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 9100%0 of 0n/a03030303
D3C.java3100%n/a01010101
D1C.java3100%n/a01010101
D2C.java3100%n/a01010101
\ No newline at end of file +neureka.ndim.config.types

neureka.ndim.config.types

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 9100%0 of 0n/a03030303
D3C.java3100%n/a01010101
D1C.java3100%n/a01010101
D2C.java3100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.html b/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.html index f98690961..b0a06c775 100644 --- a/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.html +++ b/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.html @@ -1 +1 @@ -AbstractNDC

AbstractNDC

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total51 of 58591%11 of 8687%11565100013
construct(int[], int[], int[], int[], int[])4717378%33592%32053901
equals(Object)41780%2466%240501
_simpleReshape(int[], NDConfiguration)91100%21083%2701201
toString()56100%n/a010701
equals(NDConfiguration)50100%41275%490801
hashCode()47100%n/a010901
_isSimpleConfiguration(int[], int[], int[], int[], int[])33100%8100%050701
newReshaped(int[])22100%2100%020301
_isSimpleTransposedConfiguration(int[], int[], int[])21100%4100%030401
static {...}11100%n/a010301
_cacheArray(int[])5100%n/a010101
_cached(NDConfiguration)5100%n/a010101
AbstractNDC()3100%n/a010101
\ No newline at end of file +AbstractNDC

AbstractNDC

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total51 of 58591%11 of 8687%11565100013
construct(int[], int[], int[], int[], int[])4717378%33592%32053901
equals(Object)41780%2466%240501
_simpleReshape(int[], NDConfiguration)91100%21083%2701201
toString()56100%n/a010701
equals(NDConfiguration)50100%41275%490801
hashCode()47100%n/a010901
_isSimpleConfiguration(int[], int[], int[], int[], int[])33100%8100%050701
newReshaped(int[])22100%2100%020301
_isSimpleTransposedConfiguration(int[], int[], int[])21100%4100%030401
static {...}11100%n/a010301
_cacheArray(int[])5100%n/a010101
_cached(NDConfiguration)5100%n/a010101
AbstractNDC()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.java.html b/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.java.html index 08ca03a74..2a04354dc 100644 --- a/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.java.html +++ b/docs/coverage/test/html/neureka.ndim.config/AbstractNDC.java.html @@ -250,4 +250,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Layout.html b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Layout.html index bcf4b0f99..459d9499f 100644 --- a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Layout.html +++ b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Layout.html @@ -1 +1 @@ -NDConfiguration.Layout

NDConfiguration.Layout

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 18595%5 of 3284%52012904
newStridesFor(int[])58994%11593%1911501
isCompatible(NDConfiguration.Layout)32086%3770%360301
rearrange(int[], int[], int[])41100%1583%140601
static {...}27100%n/a010501
\ No newline at end of file +NDConfiguration.Layout

NDConfiguration.Layout

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 20296%5 of 3284%52012904
newStridesFor(int[])58994%11593%1911501
isCompatible(NDConfiguration.Layout)32086%3770%360301
static {...}44100%n/a010501
rearrange(int[], int[], int[])41100%1583%140601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Utility.html b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Utility.html index 1ba899e05..c7bf97c30 100644 --- a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Utility.html +++ b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration$Utility.html @@ -1 +1 @@ -NDConfiguration.Utility

NDConfiguration.Utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 16696%5 of 2479%61933117
NDConfiguration.Utility()30%n/a111111
_incrementAt(int, int[], int[])13296%1375%131801
_decrementAt(int, int[], int[])13196%1375%131801
rearrange(int[], int[])38100%1583%140501
sizeOfShape(int[])24100%2100%020301
increment(int[], int[])18100%1375%130301
decrement(int[], int[])18100%1375%130301
\ No newline at end of file +NDConfiguration.Utility

NDConfiguration.Utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 16696%5 of 2479%61933117
NDConfiguration.Utility()30%n/a111111
_incrementAt(int, int[], int[])13296%1375%131801
_decrementAt(int, int[], int[])13196%1375%131801
rearrange(int[], int[])38100%1583%140501
sizeOfShape(int[])24100%2100%020301
increment(int[], int[])18100%1375%130301
decrement(int[], int[])18100%1375%130301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.html b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.html index 3cf09d653..d566466f9 100644 --- a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.html +++ b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.html @@ -1 +1 @@ -NDConfiguration

NDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 20897%0 of 34100%130132113
has(NDTrait)50%n/a111111
getLayout()58100%18100%01001101
asInlineArray()55100%n/a010801
isSimple()23100%6100%040401
isCompact()20100%4100%030301
lambda$isCompact$1(int)13100%4100%030101
lambda$isCompact$2(int)8100%2100%020101
of(int[], int[], int[], int[], int[])7100%n/a010101
size()7100%n/a010101
lambda$size$0(int, int)4100%n/a010101
getTraits()3100%n/a010101
getIndexToIndexAccessPattern()3100%n/a010101
isVirtual()2100%n/a010101
\ No newline at end of file +NDConfiguration

NDConfiguration

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 21097%0 of 34100%131133114
has(NDTrait)50%n/a111111
getLayout()58100%18100%01001101
asInlineArray()55100%n/a010801
isSimple()23100%6100%040401
isCompact()20100%4100%030301
lambda$isCompact$1(int)13100%4100%030101
lambda$isCompact$2(int)8100%2100%020101
of(int[], int[], int[], int[], int[])7100%n/a010101
size()7100%n/a010101
lambda$size$0(int, int)4100%n/a010101
getTraits()3100%n/a010101
getIndexToIndexAccessPattern()3100%n/a010101
none()2100%n/a010101
isVirtual()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.java.html b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.java.html index a51cfc7c7..cf4deaffb 100644 --- a/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.java.html +++ b/docs/coverage/test/html/neureka.ndim.config/NDConfiguration.java.html @@ -45,6 +45,11 @@ */ public interface NDConfiguration { + /** + * @return A {@link NDConfiguration} instance which represents the absence of a configuration. + */ + static NDConfiguration none() { return NoOpNDConfig.INSTANCE; } + static NDConfiguration of( int[] shape, // The shape of the tensor. int[] strides, // Strides are the distances between elements of a tensor in each dimension. @@ -52,7 +57,7 @@ int[] spread, int[] offset ) { - return AbstractNDC.construct(shape, strides, indicesMap, spread, offset); + return AbstractNDC.construct(shape, strides, indicesMap, spread, offset); } /** @@ -87,54 +92,54 @@ * </li> * </ul> */ - enum Layout + enum Layout { - ROW_MAJOR, - COLUMN_MAJOR, - SYMMETRIC, // Both row- and column-major compatible! - UNSPECIFIC; // Possibly a slice or something reshaped/permuted or whatnot... + ROW_MAJOR, + COLUMN_MAJOR, + SYMMETRIC, // Both row- and column-major compatible! + UNSPECIFIC; // Possibly a slice or something reshaped/permuted or whatnot... public boolean isCompatible(Layout other) { - if (this == UNSPECIFIC || other == UNSPECIFIC) return false; - if (this == SYMMETRIC || other == SYMMETRIC) return true; - return this == other; + if (this == UNSPECIFIC || other == UNSPECIFIC) return false; + if (this == SYMMETRIC || other == SYMMETRIC) return true; + return this == other; } public int[] newStridesFor(int[] shape) { - int[] order = new int[shape.length]; - for ( int i = 0; i < shape.length; i++ ) - order[i] = shape.length - 1 - i; + int[] order = new int[shape.length]; + for ( int i = 0; i < shape.length; i++ ) + order[i] = shape.length - 1 - i; - if ( this == COLUMN_MAJOR && shape.length > 1 ) { + if ( this == COLUMN_MAJOR && shape.length > 1 ) { // Swap the first two elements of the order array: - int tmp = order[0]; - order[0] = order[1]; - order[1] = tmp; + int tmp = order[0]; + order[0] = order[1]; + order[1] = tmp; } - int[] strides = new int[shape.length]; - int prod = 1; - if ( this == COLUMN_MAJOR || this == ROW_MAJOR || this == UNSPECIFIC || this == SYMMETRIC) { - for ( int i : order ) { - strides[i] = prod; - prod *= shape[i]; + int[] strides = new int[shape.length]; + int prod = 1; + if ( this == COLUMN_MAJOR || this == ROW_MAJOR || this == UNSPECIFIC || this == SYMMETRIC) { + for ( int i : order ) { + strides[i] = prod; + prod *= shape[i]; } } else - throw new IllegalStateException("Unknown data layout!"); + throw new IllegalStateException("Unknown data layout!"); - return strides; + return strides; } public int[] rearrange(int[] tln, int[] shape, int[] newForm) { - int[] shpTln = this.newStridesFor(shape); - int[] newTln = new int[newForm.length]; - for (int i = 0; i < newForm.length; i++) { - if (newForm[i] < 0) newTln[i] = shpTln[i]; - else if (newForm[i] >= 0) newTln[i] = tln[newForm[i]]; + int[] shpTln = this.newStridesFor(shape); + int[] newTln = new int[newForm.length]; + for (int i = 0; i < newForm.length; i++) { + if (newForm[i] < 0) newTln[i] = shpTln[i]; + else if (newForm[i] >= 0) newTln[i] = tln[newForm[i]]; } - return newTln; + return newTln; } } @@ -150,26 +155,26 @@ * @return The layout of the underlying data array of a tensor. */ default Layout getLayout() { - if ( !this.isCompact() ) // Non-compact tensors have at least 1 step/spread greater than 1 AND at least 1 offset greater than 0! - return Layout.UNSPECIFIC; + if ( !this.isCompact() ) // Non-compact tensors have at least 1 step/spread greater than 1 AND at least 1 offset greater than 0! + return Layout.UNSPECIFIC; else { - int[] stridesRM = Layout.ROW_MAJOR.newStridesFor(this.shape()); - boolean hasRMIndices = Arrays.equals(stridesRM, indicesMap()); - boolean isRM = (Arrays.equals(stridesRM, strides()) && hasRMIndices); + int[] stridesRM = Layout.ROW_MAJOR.newStridesFor(this.shape()); + boolean hasRMIndices = Arrays.equals(stridesRM, indicesMap()); + boolean isRM = (Arrays.equals(stridesRM, strides()) && hasRMIndices); - int[] stridesCM = Layout.COLUMN_MAJOR.newStridesFor(this.shape()); - boolean isCM = (Arrays.equals(stridesCM, strides()) && hasRMIndices); + int[] stridesCM = Layout.COLUMN_MAJOR.newStridesFor(this.shape()); + boolean isCM = (Arrays.equals(stridesCM, strides()) && hasRMIndices); - if ( isRM && isCM ) return Layout.SYMMETRIC; - if ( isRM ) return Layout.ROW_MAJOR; - if ( isCM ) return Layout.COLUMN_MAJOR; + if ( isRM && isCM ) return Layout.SYMMETRIC; + if ( isRM ) return Layout.ROW_MAJOR; + if ( isCM ) return Layout.COLUMN_MAJOR; } - return Layout.UNSPECIFIC; + return Layout.UNSPECIFIC; } - default List<NDTrait> getTraits() { return NDTrait.traitsOf(this); } + default List<NDTrait> getTraits() { return NDTrait.traitsOf(this); } - default boolean has( NDTrait trait ) { return NDTrait.traitsOf(this).contains(trait); } + default boolean has( NDTrait trait ) { return NDTrait.traitsOf(this).contains(trait); } /** * This method returns the number of axis of @@ -180,7 +185,7 @@ */ int rank(); - default int size() { return Arrays.stream(shape()).reduce(1, (a, b) -> a * b); } + default int size() { return Arrays.stream(shape()).reduce(1, (a, b) -> a * b); } /** * This method returns an array of axis sizes. @@ -328,15 +333,15 @@ * @return An array of flattened arrays which define this nd-configuration in a compact manner. */ default int[] asInlineArray() { - int rank = rank(); - int[] inline = new int[rank * 5]; + int rank = rank(); + int[] inline = new int[rank * 5]; //config format: [ shape | translation | indicesMap | offsets | spreads ] - System.arraycopy(shape(), 0, inline, rank * 0, rank); //=> SHAPE - System.arraycopy(strides(), 0, inline, rank * 1, rank); //=> TRANSLATION (translates n-dimensional indices to an index) - System.arraycopy(indicesMap(), 0, inline, rank * 2, rank); //=> INDICES MAP (translates scalar to n-dimensional index) - System.arraycopy(offset(), 0, inline, rank * 3, rank); //=> SPREAD / STRIDES (step size for dimensions in underlying parent tensor) - System.arraycopy(spread(), 0, inline, rank * 4, rank); //=> OFFSET (nd-position inside underlying parent tensor) - return inline; + System.arraycopy(shape(), 0, inline, rank * 0, rank); //=> SHAPE + System.arraycopy(strides(), 0, inline, rank * 1, rank); //=> TRANSLATION (translates n-dimensional indices to an index) + System.arraycopy(indicesMap(), 0, inline, rank * 2, rank); //=> INDICES MAP (translates scalar to n-dimensional index) + System.arraycopy(offset(), 0, inline, rank * 3, rank); //=> SPREAD / STRIDES (step size for dimensions in underlying parent tensor) + System.arraycopy(spread(), 0, inline, rank * 4, rank); //=> OFFSET (nd-position inside underlying parent tensor) + return inline; } int hashCode(); @@ -369,12 +374,12 @@ * @return The truth value determining if this configuration is not modeling more complex indices like permuted views or slices... */ default boolean isSimple() { - int[] simpleTranslation = this.getLayout().newStridesFor(this.shape()); - return Arrays.equals(this.strides(), simpleTranslation) + int[] simpleTranslation = this.getLayout().newStridesFor(this.shape()); + return Arrays.equals(this.strides(), simpleTranslation) && - Arrays.equals(this.indicesMap(), simpleTranslation) + Arrays.equals(this.indicesMap(), simpleTranslation) && - isCompact(); + isCompact(); } /** @@ -385,22 +390,22 @@ * @return The truth value determining if this configuration has no offset and spread/steps larger than 1. */ default boolean isCompact() { - return - IntStream.range(0, this.rank()).allMatch( i -> this.spread(i) == 1 || this.spread(i) == 0 ) + return + IntStream.range(0, this.rank()).allMatch( i -> this.spread(i) == 1 || this.spread(i) == 0 ) && - IntStream.range(0, this.rank()).allMatch(i -> this.offset(i) == 0); + IntStream.range(0, this.rank()).allMatch(i -> this.offset(i) == 0); } /** * @return The truth value determining if this {@link NDConfiguration} * represents virtual tensors (see {@link Tensor#isVirtual()}). */ - default boolean isVirtual() { return false; } + default boolean isVirtual() { return false; } /** * @return A function which can map tensor indices to the indices of its data array. */ - default IndexToIndexFunction getIndexToIndexAccessPattern() { return this::indexOfIndex; } + default IndexToIndexFunction getIndexToIndexAccessPattern() { return this::indexOfIndex; } /** * Implementations of this are produced and returned by the {@link #getIndexToIndexAccessPattern()} @@ -416,62 +421,62 @@ * for nd-configuration related operations like reshaping, * incrementing or decrementing index arrays... */ - class Utility { + class Utility { public static int[] rearrange(int[] array, int[] pointers) { - int[] newShp = new int[pointers.length]; - for (int i = 0; i < pointers.length; i++) { - if (pointers[i] < 0) newShp[i] = Math.abs(pointers[i]); - else if (pointers[i] >= 0) newShp[i] = array[pointers[i]]; + int[] newShp = new int[pointers.length]; + for (int i = 0; i < pointers.length; i++) { + if (pointers[i] < 0) newShp[i] = Math.abs(pointers[i]); + else if (pointers[i] >= 0) newShp[i] = array[pointers[i]]; } - return newShp; + return newShp; } public static void increment(int[] indices, int[] shape) { - int i = shape.length - 1; - while (i >= 0 && i < shape.length) i = _incrementAt(i, indices, shape); - } + int i = shape.length - 1; + while (i >= 0 && i < shape.length) i = _incrementAt(i, indices, shape); + } private static int _incrementAt(int i, int[] indices, int[] shape) { - if (indices[i] < shape[i]) { - indices[i]++; - if (indices[i] == shape[i]) { - indices[i] = 0; - i--; - } else i = -1; - } else i--; - return i; + if (indices[i] < shape[i]) { + indices[i]++; + if (indices[i] == shape[i]) { + indices[i] = 0; + i--; + } else i = -1; + } else i--; + return i; } public static void decrement(int[] indices, int[] shape) { - int i = shape.length - 1; - while (i >= 0 && i < shape.length) i = _decrementAt(i, indices, shape); - } + int i = shape.length - 1; + while (i >= 0 && i < shape.length) i = _decrementAt(i, indices, shape); + } private static int _decrementAt(int i, int[] indices, int[] shape) { - if (indices[i] >= 0) { - indices[i]--; - if (indices[i] == -1) { - indices[i] = shape[i] - 1; - i--; - } else i = -1; - } else i--; - return i; + if (indices[i] >= 0) { + indices[i]--; + if (indices[i] == -1) { + indices[i] = shape[i] - 1; + i--; + } else i = -1; + } else i--; + return i; } public static int sizeOfShape( int[] shape ) { - int size = 1; - for (int i : shape) size *= i; - return size; + int size = 1; + for (int i : shape) size *= i; + return size; } } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NDTrait.html b/docs/coverage/test/html/neureka.ndim.config/NDTrait.html index 8cc079a83..82f000eb4 100644 --- a/docs/coverage/test/html/neureka.ndim.config/NDTrait.html +++ b/docs/coverage/test/html/neureka.ndim.config/NDTrait.html @@ -1 +1 @@ -NDTrait

NDTrait

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 40191%28 of 7663%2752159014
_isOffsetMatrix(NDConfiguration)83380%4660%461801
_isMatrix(NDConfiguration)53587%4450%450701
lambda$_isCompact$0(NDConfiguration, int)5861%3125%230101
_isContinuousMatrix(NDConfiguration)43288%3562%350601
_isRM(NDConfiguration)33892%3770%360501
_isCM(NDConfiguration)33892%3770%360501
_last2DimensionsAreNotPermuted(NDConfiguration)33090%2466%240501
_rightSpreadPadding(NDConfiguration)31583%2250%230401
_isCompact(NDConfiguration)11995%2250%230301
lambda$_isCompact$1(NDConfiguration, int)1787%1150%120101
static {...}45100%n/a010701
traitsOf(NDConfiguration)32100%4100%030401
_isSimple(NDConfiguration)23100%1583%140401
NDTrait(String, int, Predicate)10100%n/a010101
\ No newline at end of file +NDTrait

NDTrait

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 42691%28 of 7663%2752159014
_isOffsetMatrix(NDConfiguration)83380%4660%461801
_isMatrix(NDConfiguration)53587%4450%450701
lambda$_isCompact$0(NDConfiguration, int)5861%3125%230101
_isContinuousMatrix(NDConfiguration)43288%3562%350601
_isRM(NDConfiguration)33892%3770%360501
_isCM(NDConfiguration)33892%3770%360501
_last2DimensionsAreNotPermuted(NDConfiguration)33090%2466%240501
_rightSpreadPadding(NDConfiguration)31583%2250%230401
_isCompact(NDConfiguration)11995%2250%230301
lambda$_isCompact$1(NDConfiguration, int)1787%1150%120101
static {...}70100%n/a010701
traitsOf(NDConfiguration)32100%4100%030401
_isSimple(NDConfiguration)23100%1583%140401
NDTrait(String, int, Predicate)10100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NDTrait.java.html b/docs/coverage/test/html/neureka.ndim.config/NDTrait.java.html index 1860c0d64..92620d2a7 100644 --- a/docs/coverage/test/html/neureka.ndim.config/NDTrait.java.html +++ b/docs/coverage/test/html/neureka.ndim.config/NDTrait.java.html @@ -139,4 +139,4 @@ return spreadCol == 1 && spreadRow == 1; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NoOpNDConfig.html b/docs/coverage/test/html/neureka.ndim.config/NoOpNDConfig.html new file mode 100644 index 000000000..7c914e14d --- /dev/null +++ b/docs/coverage/test/html/neureka.ndim.config/NoOpNDConfig.html @@ -0,0 +1 @@ +NoOpNDConfig

NoOpNDConfig

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total38 of 4311%0 of 0n/a161716171617
shape()30%n/a111111
indicesMap()30%n/a111111
strides()30%n/a111111
spread()30%n/a111111
offset()30%n/a111111
indicesOfIndex(int)30%n/a111111
rank()20%n/a111111
shape(int)20%n/a111111
indicesMap(int)20%n/a111111
strides(int)20%n/a111111
spread(int)20%n/a111111
offset(int)20%n/a111111
indexOfIndex(int)20%n/a111111
indexOfIndices(int[])20%n/a111111
equals(NDConfiguration)20%n/a111111
newReshaped(int[])20%n/a111111
static {...}5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/NoOpNDConfig.java.html b/docs/coverage/test/html/neureka.ndim.config/NoOpNDConfig.java.html new file mode 100644 index 000000000..e5f72e9eb --- /dev/null +++ b/docs/coverage/test/html/neureka.ndim.config/NoOpNDConfig.java.html @@ -0,0 +1,90 @@ +NoOpNDConfig.java

NoOpNDConfig.java

package neureka.ndim.config;
+
+final class NoOpNDConfig implements NDConfiguration
+{
+
+    static final NoOpNDConfig INSTANCE = new NoOpNDConfig();
+
+    private NoOpNDConfig() {}
+
+    @Override
+    public int rank() {
+        return 0;
+    }
+
+    @Override
+    public int[] shape() {
+        return new int[0];
+    }
+
+    @Override
+    public int shape(int i) {
+        return 0;
+    }
+
+    @Override
+    public int[] indicesMap() {
+        return new int[0];
+    }
+
+    @Override
+    public int indicesMap(int i) {
+        return 0;
+    }
+
+    @Override
+    public int[] strides() {
+        return new int[0];
+    }
+
+    @Override
+    public int strides(int i) {
+        return 0;
+    }
+
+    @Override
+    public int[] spread() {
+        return new int[0];
+    }
+
+    @Override
+    public int spread(int i) {
+        return 0;
+    }
+
+    @Override
+    public int[] offset() {
+        return new int[0];
+    }
+
+    @Override
+    public int offset(int i) {
+        return 0;
+    }
+
+    @Override
+    public int indexOfIndex(int index) {
+        return 0;
+    }
+
+    @Override
+    public int[] indicesOfIndex(int index) {
+        return new int[0];
+    }
+
+    @Override
+    public int indexOfIndices(int[] indices) {
+        return 0;
+    }
+
+    @Override
+    public boolean equals(NDConfiguration ndc) {
+        return false;
+    }
+
+    @Override
+    public NDConfiguration newReshaped(int[] newForm) {
+        return this;
+    }
+}
+
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/index.html b/docs/coverage/test/html/neureka.ndim.config/index.html index 43f0d1cba..09aa67162 100644 --- a/docs/coverage/test/html/neureka.ndim.config/index.html +++ b/docs/coverage/test/html/neureka.ndim.config/index.html @@ -1 +1 @@ -neureka.ndim.config

neureka.ndim.config

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total105 of 1,54593%49 of 25280%501771125125105
AbstractNDC5153491%117587%1156510001301
NDTrait3636591%284863%275215901401
NDConfiguration.Layout817795%52784%5201290401
NDConfiguration520397%34100%13013211301
NDConfiguration.Utility516196%51979%6193311701
\ No newline at end of file +neureka.ndim.config

neureka.ndim.config

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total143 of 1,63291%49 of 25280%6619527269186906
AbstractNDC5153491%117587%1156510001301
NoOpNDConfig38511%n/a16171617161701
NDTrait3639091%284863%275215901401
NDConfiguration.Layout819496%52784%5201290401
NDConfiguration520597%34100%13113311401
NDConfiguration.Utility516196%51979%6193311701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.config/index.source.html b/docs/coverage/test/html/neureka.ndim.config/index.source.html index 17782046b..5e9a149ad 100644 --- a/docs/coverage/test/html/neureka.ndim.config/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.config/index.source.html @@ -1 +1 @@ -neureka.ndim.config

neureka.ndim.config

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total105 of 1,54593%49 of 25280%501771125125105
AbstractNDC.java5153491%117587%1156510001301
NDTrait.java3636591%284863%275215901401
NDConfiguration.java1854196%108088%126959222403
\ No newline at end of file +neureka.ndim.config

neureka.ndim.config

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total143 of 1,63291%49 of 25280%6619527269186906
AbstractNDC.java5153491%117587%1156510001301
NoOpNDConfig.java38511%n/a16171617161701
NDTrait.java3639091%284863%275215901401
NDConfiguration.java1856096%108088%127059322503
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.html index f81393047..5ce8b87e6 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.html @@ -1 +1 @@ -Permuted2DCIterator

Permuted2DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 10883%4 of 850%41242328
set(int, int)100%20%223311
get(int)80%20%221111
decrement()23100%2100%020501
increment()21100%2100%020501
Permuted2DCIterator(Permuted2DConfiguration)15100%n/a010401
get()13100%n/a010101
set(int[])11100%n/a010301
i()7100%n/a010101
\ No newline at end of file +Permuted2DCIterator

Permuted2DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total18 of 10883%4 of 850%41242328
set(int, int)100%20%223311
get(int)80%20%221111
decrement()23100%2100%020501
increment()21100%2100%020501
Permuted2DCIterator(Permuted2DConfiguration)15100%n/a010401
get()13100%n/a010101
set(int[])11100%n/a010301
i()7100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.java.html index b9b8518a2..f43fff6b1 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted2DCIterator.java.html @@ -52,4 +52,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.html index 4c41a623c..490c6ad64 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.html @@ -1 +1 @@ -Permuted3DCIterator

Permuted3DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 166100%0 of 16100%01603208
decrement()39100%4100%030801
increment()35100%4100%030801
Permuted3DCIterator(Permuted3DConfiguration)18100%n/a010501
get()18100%n/a010101
set(int, int)17100%4100%030401
set(int[])16100%n/a010401
get(int)14100%4100%030101
i()9100%n/a010101
\ No newline at end of file +Permuted3DCIterator

Permuted3DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 166100%0 of 16100%01603208
decrement()39100%4100%030801
increment()35100%4100%030801
Permuted3DCIterator(Permuted3DConfiguration)18100%n/a010501
get()18100%n/a010101
set(int, int)17100%4100%030401
set(int[])16100%n/a010401
get(int)14100%4100%030101
i()9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.java.html index 33c40ba55..6fb693b97 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/Permuted3DCIterator.java.html @@ -64,4 +64,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.html b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.html index bbd18258f..8237e38aa 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.html @@ -1 +1 @@ -neureka.ndim.iterator.types.permuted

neureka.ndim.iterator.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total18 of 27493%4 of 2483%42845521602
Permuted2DCIterator189083%4450%4124232801
Permuted3DCIterator166100%16100%0160320801
\ No newline at end of file +neureka.ndim.iterator.types.permuted

neureka.ndim.iterator.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total18 of 27493%4 of 2483%42845521602
Permuted2DCIterator189083%4450%4124232801
Permuted3DCIterator166100%16100%0160320801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.source.html b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.source.html index 9475c8211..8ed5d2c54 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.permuted/index.source.html @@ -1 +1 @@ -neureka.ndim.iterator.types.permuted

neureka.ndim.iterator.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total18 of 27493%4 of 2483%42845521602
Permuted2DCIterator.java189083%4450%4124232801
Permuted3DCIterator.java166100%16100%0160320801
\ No newline at end of file +neureka.ndim.iterator.types.permuted

neureka.ndim.iterator.types.permuted

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total18 of 27493%4 of 2483%42845521602
Permuted2DCIterator.java189083%4450%4124232801
Permuted3DCIterator.java166100%16100%0160320801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.html index a17a219bb..d64c892bd 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.html @@ -1 +1 @@ -Simple1DCIterator

Simple1DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 49100%0 of 0n/a080908
Simple1DCIterator(Simple1DConfiguration)9100%n/a010201
get()8100%n/a010101
increment()7100%n/a010101
decrement()7100%n/a010101
set(int[])6100%n/a010101
i()5100%n/a010101
set(int, int)4100%n/a010101
get(int)3100%n/a010101
\ No newline at end of file +Simple1DCIterator

Simple1DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 49100%0 of 0n/a080908
Simple1DCIterator(Simple1DConfiguration)9100%n/a010201
get()8100%n/a010101
increment()7100%n/a010101
decrement()7100%n/a010101
set(int[])6100%n/a010101
i()5100%n/a010101
set(int, int)4100%n/a010101
get(int)3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.java.html index 763a62452..90ea01e6d 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple1DCIterator.java.html @@ -35,4 +35,4 @@ @Override public final void set( int[] indices) { _d1 = indices[0]; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.html index de4428b76..2c4a291bb 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.html @@ -1 +1 @@ -Simple2DCIterator

Simple2DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 106100%0 of 8100%01202308
decrement()23100%2100%020501
increment()21100%2100%020501
Simple2DCIterator(Simple2DConfiguration)13100%n/a010401
get()13100%n/a010101
set(int[])11100%n/a010301
set(int, int)10100%2100%020301
get(int)8100%2100%020101
i()7100%n/a010101
\ No newline at end of file +Simple2DCIterator

Simple2DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 106100%0 of 8100%01202308
decrement()23100%2100%020501
increment()21100%2100%020501
Simple2DCIterator(Simple2DConfiguration)13100%n/a010401
get()13100%n/a010101
set(int[])11100%n/a010301
set(int, int)10100%2100%020301
get(int)8100%2100%020101
i()7100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.java.html index 77e60c0aa..64174c47f 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple2DCIterator.java.html @@ -52,4 +52,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.html index d763a773e..501586bfe 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.html @@ -1 +1 @@ -Simple3DCIterator

Simple3DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 164100%0 of 16100%01603208
decrement()39100%4100%030801
increment()35100%4100%030801
get()18100%n/a010101
set(int, int)17100%4100%030401
Simple3DCIterator(Simple3DConfiguration)16100%n/a010501
set(int[])16100%n/a010401
get(int)14100%4100%030101
i()9100%n/a010101
\ No newline at end of file +Simple3DCIterator

Simple3DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 164100%0 of 16100%01603208
decrement()39100%4100%030801
increment()35100%4100%030801
get()18100%n/a010101
set(int, int)17100%4100%030401
Simple3DCIterator(Simple3DConfiguration)16100%n/a010501
set(int[])16100%n/a010401
get(int)14100%4100%030101
i()9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.java.html index 2476777e4..e63d1b035 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/Simple3DCIterator.java.html @@ -70,4 +70,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.html index e560eb69e..24264422f 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.html @@ -1 +1 @@ -neureka.ndim.iterator.types.simple

neureka.ndim.iterator.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 319100%0 of 24100%03606402403
Simple3DCIterator164100%16100%0160320801
Simple2DCIterator106100%8100%0120230801
Simple1DCIterator49100%n/a08090801
\ No newline at end of file +neureka.ndim.iterator.types.simple

neureka.ndim.iterator.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 319100%0 of 24100%03606402403
Simple3DCIterator164100%16100%0160320801
Simple2DCIterator106100%8100%0120230801
Simple1DCIterator49100%n/a08090801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.source.html b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.source.html index b095b61bf..ffab7bdd6 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.simple/index.source.html @@ -1 +1 @@ -neureka.ndim.iterator.types.simple

neureka.ndim.iterator.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 319100%0 of 24100%03606402403
Simple3DCIterator.java164100%16100%0160320801
Simple2DCIterator.java106100%8100%0120230801
Simple1DCIterator.java49100%n/a08090801
\ No newline at end of file +neureka.ndim.iterator.types.simple

neureka.ndim.iterator.types.simple

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 319100%0 of 24100%03606402403
Simple3DCIterator.java164100%16100%0160320801
Simple2DCIterator.java106100%8100%0120230801
Simple1DCIterator.java49100%n/a08090801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.html index 414104238..ffa65d3ac 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.html @@ -1 +1 @@ -Sliced1DCIterator

Sliced1DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 5887%0 of 0n/a282928
set(int, int)40%n/a111111
get(int)30%n/a111111
Sliced1DCIterator(Sliced1DConfiguration)18100%n/a010201
get()8100%n/a010101
increment()7100%n/a010101
decrement()7100%n/a010101
set(int[])6100%n/a010101
i()5100%n/a010101
\ No newline at end of file +Sliced1DCIterator

Sliced1DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 5887%0 of 0n/a282928
set(int, int)40%n/a111111
get(int)30%n/a111111
Sliced1DCIterator(Sliced1DConfiguration)18100%n/a010201
get()8100%n/a010101
increment()7100%n/a010101
decrement()7100%n/a010101
set(int[])6100%n/a010101
i()5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.java.html index 80f5ddc52..26b75e729 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced1DCIterator.java.html @@ -33,4 +33,4 @@ @Override public final void set( int[] indices ) { _d1 = indices[0]; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.html index 177bbf769..0976be399 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.html @@ -1 +1 @@ -Sliced2DCIterator

Sliced2DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 112100%0 of 8100%01202308
decrement()23100%2100%020501
increment()21100%2100%020501
Sliced2DCIterator(Sliced2DConfiguration)19100%n/a010401
get()13100%n/a010101
set(int[])11100%n/a010301
set(int, int)10100%2100%020301
get(int)8100%2100%020101
i()7100%n/a010101
\ No newline at end of file +Sliced2DCIterator

Sliced2DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 112100%0 of 8100%01202308
decrement()23100%2100%020501
increment()21100%2100%020501
Sliced2DCIterator(Sliced2DConfiguration)19100%n/a010401
get()13100%n/a010101
set(int[])11100%n/a010301
set(int, int)10100%2100%020301
get(int)8100%2100%020101
i()7100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.java.html index bd5c34f7a..39d9f9c94 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced2DCIterator.java.html @@ -53,4 +53,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.html index fd0b21c31..8d21ae677 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.html @@ -1 +1 @@ -Sliced3DCIterator

Sliced3DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total31 of 17081%8 of 1650%61653228
set(int, int)170%40%334411
get(int)140%40%331111
decrement()39100%4100%030801
increment()35100%4100%030801
Sliced3DCIterator(Sliced3DConfiguration)22100%n/a010501
get()18100%n/a010101
set(int[])16100%n/a010401
i()9100%n/a010101
\ No newline at end of file +Sliced3DCIterator

Sliced3DCIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total31 of 17081%8 of 1650%61653228
set(int, int)170%40%334411
get(int)140%40%331111
decrement()39100%4100%030801
increment()35100%4100%030801
Sliced3DCIterator(Sliced3DConfiguration)22100%n/a010501
get()18100%n/a010101
set(int[])16100%n/a010401
i()9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.java.html index 8113f8b93..29ad3da12 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/Sliced3DCIterator.java.html @@ -64,4 +64,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.html index 526399afe..8e19d8412 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.html @@ -1 +1 @@ -SlicedNDIterator

SlicedNDIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 6995%0 of 0n/a111115111
shape()30%n/a111111
SlicedNDIterator(NDConfiguration)16100%n/a010501
set(int[])9100%n/a010101
increment()6100%n/a010101
decrement()6100%n/a010101
i()6100%n/a010101
set(int, int)6100%n/a010101
shape(int)5100%n/a010101
get(int)5100%n/a010101
rank()4100%n/a010101
get()3100%n/a010101
\ No newline at end of file +SlicedNDIterator

SlicedNDIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 6995%0 of 0n/a111115111
shape()30%n/a111111
SlicedNDIterator(NDConfiguration)16100%n/a010501
set(int[])9100%n/a010101
increment()6100%n/a010101
decrement()6100%n/a010101
i()6100%n/a010101
set(int, int)6100%n/a010101
shape(int)5100%n/a010101
get(int)5100%n/a010101
rank()4100%n/a010101
get()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.java.html index 53d432fef..471e7169d 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/SlicedNDIterator.java.html @@ -48,4 +48,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.html index d0e7d821c..1544254bb 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.html @@ -1 +1 @@ -neureka.ndim.iterator.types.sliced

neureka.ndim.iterator.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total41 of 40989%8 of 2466%94787953504
Sliced3DCIterator3113981%8850%6165322801
Sliced1DCIterator75187%n/a28292801
SlicedNDIterator36695%n/a11111511101
Sliced2DCIterator112100%8100%0120230801
\ No newline at end of file +neureka.ndim.iterator.types.sliced

neureka.ndim.iterator.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total41 of 40989%8 of 2466%94787953504
Sliced3DCIterator3113981%8850%6165322801
Sliced1DCIterator75187%n/a28292801
SlicedNDIterator36695%n/a11111511101
Sliced2DCIterator112100%8100%0120230801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.source.html b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.source.html index ad9eb6a16..50160bfaf 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.sliced/index.source.html @@ -1 +1 @@ -neureka.ndim.iterator.types.sliced

neureka.ndim.iterator.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total41 of 40989%8 of 2466%94787953504
Sliced3DCIterator.java3113981%8850%6165322801
Sliced1DCIterator.java75187%n/a28292801
SlicedNDIterator.java36695%n/a11111511101
Sliced2DCIterator.java112100%8100%0120230801
\ No newline at end of file +neureka.ndim.iterator.types.sliced

neureka.ndim.iterator.types.sliced

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total41 of 40989%8 of 2466%94787953504
Sliced3DCIterator.java3113981%8850%6165322801
Sliced1DCIterator.java75187%n/a28292801
SlicedNDIterator.java36695%n/a11111511101
Sliced2DCIterator.java112100%8100%0120230801
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.html b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.html index 1942dd00c..73fcaf206 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.html @@ -1 +1 @@ -VirtualNDIterator

VirtualNDIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 3528%0 of 0n/a711711711
shape(int)50%n/a111111
get(int)50%n/a111111
get()50%n/a111111
shape()40%n/a111111
rank()40%n/a111111
decrement()10%n/a111111
set(int, int)10%n/a111111
VirtualNDIterator(VirtualNDConfiguration)6100%n/a010101
i()2100%n/a010101
increment()1100%n/a010101
set(int[])1100%n/a010101
\ No newline at end of file +VirtualNDIterator

VirtualNDIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 3528%0 of 0n/a711711711
shape(int)50%n/a111111
get(int)50%n/a111111
get()50%n/a111111
shape()40%n/a111111
rank()40%n/a111111
decrement()10%n/a111111
set(int, int)10%n/a111111
VirtualNDIterator(VirtualNDConfiguration)6100%n/a010101
i()2100%n/a010101
increment()1100%n/a010101
set(int[])1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.java.html index 6864e67ec..cbc05b49a 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/VirtualNDIterator.java.html @@ -61,4 +61,4 @@ @Override public final int rank() { return _conf.rank(); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.html b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.html index dd743c47d..c3047c8a2 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.html @@ -1 +1 @@ -neureka.ndim.iterator.types.virtual

neureka.ndim.iterator.types.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total25 of 3528%0 of 0n/a71171171101
VirtualNDIterator251028%n/a71171171101
\ No newline at end of file +neureka.ndim.iterator.types.virtual

neureka.ndim.iterator.types.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total25 of 3528%0 of 0n/a71171171101
VirtualNDIterator251028%n/a71171171101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.source.html b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.source.html index 4c92b51a4..377eb2ad9 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.iterator.types.virtual/index.source.html @@ -1 +1 @@ -neureka.ndim.iterator.types.virtual

neureka.ndim.iterator.types.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total25 of 3528%0 of 0n/a71171171101
VirtualNDIterator.java251028%n/a71171171101
\ No newline at end of file +neureka.ndim.iterator.types.virtual

neureka.ndim.iterator.types.virtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total25 of 3528%0 of 0n/a71171171101
VirtualNDIterator.java251028%n/a71171171101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator/NDIterator$NonVirtual.html b/docs/coverage/test/html/neureka.ndim.iterator/NDIterator$NonVirtual.html index e7fc2fa1a..e3c1d4ada 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator/NDIterator$NonVirtual.html +++ b/docs/coverage/test/html/neureka.ndim.iterator/NDIterator$NonVirtual.html @@ -1 +1 @@ -NDIterator.NonVirtual

NDIterator.NonVirtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 15100%0 of 0n/a010101
static {...}15100%n/a010101
\ No newline at end of file +NDIterator.NonVirtual

NDIterator.NonVirtual

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 24100%0 of 0n/a010101
static {...}24100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.html b/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.html index 85383f4a8..2503fb025 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.html +++ b/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.html @@ -1 +1 @@ -NDIterator

NDIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 105100%0 of 20100%01401604
of(NDConfiguration, NDIterator.NonVirtual)89100%20100%01101101
getIndexAndIncrement()7100%n/a010301
of(Tensor, NDIterator.NonVirtual)5100%n/a010101
of(Tensor)4100%n/a010101
\ No newline at end of file +NDIterator

NDIterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 105100%0 of 20100%01401604
of(NDConfiguration, NDIterator.NonVirtual)89100%20100%01101101
getIndexAndIncrement()7100%n/a010301
of(Tensor, NDIterator.NonVirtual)5100%n/a010101
of(Tensor)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.java.html b/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.java.html index dcd3d5095..5818e1e2e 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.java.html +++ b/docs/coverage/test/html/neureka.ndim.iterator/NDIterator.java.html @@ -149,4 +149,4 @@ int rank(); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator/index.html b/docs/coverage/test/html/neureka.ndim.iterator/index.html index da973cf93..f7ffecb58 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator/index.html +++ b/docs/coverage/test/html/neureka.ndim.iterator/index.html @@ -1 +1 @@ -neureka.ndim.iterator

neureka.ndim.iterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 120100%0 of 20100%0150170502
NDIterator105100%20100%0140160401
NDIterator.NonVirtual15100%n/a01010101
\ No newline at end of file +neureka.ndim.iterator

neureka.ndim.iterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 129100%0 of 20100%0150170502
NDIterator105100%20100%0140160401
NDIterator.NonVirtual24100%n/a01010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim.iterator/index.source.html b/docs/coverage/test/html/neureka.ndim.iterator/index.source.html index 3fa4d0a84..98dae68dc 100644 --- a/docs/coverage/test/html/neureka.ndim.iterator/index.source.html +++ b/docs/coverage/test/html/neureka.ndim.iterator/index.source.html @@ -1 +1 @@ -neureka.ndim.iterator

neureka.ndim.iterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 120100%0 of 20100%0150170502
NDIterator.java120100%20100%0150170502
\ No newline at end of file +neureka.ndim.iterator

neureka.ndim.iterator

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total0 of 129100%0 of 20100%0150170502
NDIterator.java129100%20100%0150170502
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDConstructor$1.html b/docs/coverage/test/html/neureka.ndim/NDConstructor$1.html index 8509ca04c..36e2c762a 100644 --- a/docs/coverage/test/html/neureka.ndim/NDConstructor$1.html +++ b/docs/coverage/test/html/neureka.ndim/NDConstructor$1.html @@ -1 +1 @@ -NDConstructor.new NDConstructor() {...}

NDConstructor.new NDConstructor() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 5952%5 of 837%6831124
produceNDC(boolean)201948%5337%451601
getSize()40%n/a111111
getShape()40%n/a111111
{...}12100%n/a010301
\ No newline at end of file +NDConstructor.new NDConstructor() {...}

NDConstructor.new NDConstructor() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total28 of 5952%5 of 837%6831124
produceNDC(boolean)201948%5337%451601
getSize()40%n/a111111
getShape()40%n/a111111
{...}12100%n/a010301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDConstructor$2.html b/docs/coverage/test/html/neureka.ndim/NDConstructor$2.html index 3e3138814..31c1b8fa3 100644 --- a/docs/coverage/test/html/neureka.ndim/NDConstructor$2.html +++ b/docs/coverage/test/html/neureka.ndim/NDConstructor$2.html @@ -1 +1 @@ -NDConstructor.new NDConstructor() {...}

NDConstructor.new NDConstructor() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 8484%4 of 850%3811704
produceNDC(boolean)134878%4450%3511201
{...}15100%n/a010301
getShape()5100%n/a010101
getSize()3100%n/a010101
\ No newline at end of file +NDConstructor.new NDConstructor() {...}

NDConstructor.new NDConstructor() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 8484%4 of 850%3811704
produceNDC(boolean)134878%4450%3511201
{...}15100%n/a010301
getShape()5100%n/a010101
getSize()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDConstructor.html b/docs/coverage/test/html/neureka.ndim/NDConstructor.html index 4f2b1754a..8f5258e1f 100644 --- a/docs/coverage/test/html/neureka.ndim/NDConstructor.html +++ b/docs/coverage/test/html/neureka.ndim/NDConstructor.html @@ -1 +1 @@ -NDConstructor

NDConstructor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 5352%1 of 250%153904
of(int[])251130%1150%123601
of(int[], int[], int[], int[], int[])8100%n/a010101
of(NDConfiguration)5100%n/a010101
of(Shape)4100%n/a010101
\ No newline at end of file +NDConstructor

NDConstructor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total25 of 5352%1 of 250%153904
of(int[])251130%1150%123601
of(int[], int[], int[], int[], int[])8100%n/a010101
of(NDConfiguration)5100%n/a010101
of(Shape)4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDConstructor.java.html b/docs/coverage/test/html/neureka.ndim/NDConstructor.java.html index 1193068b4..4b0133f4d 100644 --- a/docs/coverage/test/html/neureka.ndim/NDConstructor.java.html +++ b/docs/coverage/test/html/neureka.ndim/NDConstructor.java.html @@ -98,4 +98,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDUtil.html b/docs/coverage/test/html/neureka.ndim/NDUtil.html index 2fa621bca..d92e9d4c6 100644 --- a/docs/coverage/test/html/neureka.ndim/NDUtil.html +++ b/docs/coverage/test/html/neureka.ndim/NDUtil.html @@ -1 +1 @@ -NDUtil

NDUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 7295%0 of 6100%161813
NDUtil()30%n/a111111
shapeString(int[])39100%4100%030401
asList(int[])30100%2100%020301
\ No newline at end of file +NDUtil

NDUtil

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 7295%0 of 6100%161813
NDUtil()30%n/a111111
shapeString(int[])39100%4100%030401
asList(int[])30100%2100%020301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDUtil.java.html b/docs/coverage/test/html/neureka.ndim/NDUtil.java.html index b958fec45..c1c79f485 100644 --- a/docs/coverage/test/html/neureka.ndim/NDUtil.java.html +++ b/docs/coverage/test/html/neureka.ndim/NDUtil.java.html @@ -25,4 +25,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDimensional.html b/docs/coverage/test/html/neureka.ndim/NDimensional.html index 691230f4c..2cb551652 100644 --- a/docs/coverage/test/html/neureka.ndim/NDimensional.html +++ b/docs/coverage/test/html/neureka.ndim/NDimensional.html @@ -1 +1 @@ -NDimensional

NDimensional

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 63100%0 of 0n/a014014014
shape()5100%n/a010101
indicesMap()5100%n/a010101
strides()5100%n/a010101
spread()5100%n/a010101
offset()5100%n/a010101
shape(int)5100%n/a010101
size()5100%n/a010101
indexOfIndex(int)5100%n/a010101
indicesOfIndex(int)5100%n/a010101
indexOfIndices(int[])5100%n/a010101
rank()4100%n/a010101
getRank()3100%n/a010101
getShape()3100%n/a010101
getSize()3100%n/a010101
\ No newline at end of file +NDimensional

NDimensional

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 63100%0 of 0n/a014014014
shape()5100%n/a010101
indicesMap()5100%n/a010101
strides()5100%n/a010101
spread()5100%n/a010101
offset()5100%n/a010101
shape(int)5100%n/a010101
size()5100%n/a010101
indexOfIndex(int)5100%n/a010101
indicesOfIndex(int)5100%n/a010101
indexOfIndices(int[])5100%n/a010101
rank()4100%n/a010101
getRank()3100%n/a010101
getShape()3100%n/a010101
getSize()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/NDimensional.java.html b/docs/coverage/test/html/neureka.ndim/NDimensional.java.html index ff78a6d61..eaca36dc5 100644 --- a/docs/coverage/test/html/neureka.ndim/NDimensional.java.html +++ b/docs/coverage/test/html/neureka.ndim/NDimensional.java.html @@ -117,4 +117,4 @@ default int indexOfIndices( int[] indices ) { return getNDConf().indexOfIndices( indices ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/index.html b/docs/coverage/test/html/neureka.ndim/index.html index dc02c4c2b..f257420c2 100644 --- a/docs/coverage/test/html/neureka.ndim/index.html +++ b/docs/coverage/test/html/neureka.ndim/index.html @@ -1 +1 @@ -neureka.ndim

neureka.ndim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total69 of 33179%10 of 2458%114185732905
NDConstructor.new NDConstructor() {...}283152%5337%683112401
NDConstructor252852%1150%15390401
NDConstructor.new NDConstructor() {...}137184%4450%381170401
NDUtil36995%6100%16181301
NDimensional63100%n/a01401401401
\ No newline at end of file +neureka.ndim

neureka.ndim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total69 of 33179%10 of 2458%114185732905
NDConstructor.new NDConstructor() {...}283152%5337%683112401
NDConstructor252852%1150%15390401
NDConstructor.new NDConstructor() {...}137184%4450%381170401
NDUtil36995%6100%16181301
NDimensional63100%n/a01401401401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.ndim/index.source.html b/docs/coverage/test/html/neureka.ndim/index.source.html index 52538a1ce..fe3dbff65 100644 --- a/docs/coverage/test/html/neureka.ndim/index.source.html +++ b/docs/coverage/test/html/neureka.ndim/index.source.html @@ -1 +1 @@ -neureka.ndim

neureka.ndim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total69 of 33179%10 of 2458%114185732905
NDConstructor.java6613066%10844%102173521203
NDUtil.java36995%6100%16181301
NDimensional.java63100%n/a01401401401
\ No newline at end of file +neureka.ndim

neureka.ndim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total69 of 33179%10 of 2458%114185732905
NDConstructor.java6613066%10844%102173521203
NDUtil.java36995%6100%16181301
NDimensional.java63100%n/a01401401401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/ADAM.html b/docs/coverage/test/html/neureka.optimization.implementations/ADAM.html index c79318985..2b5f76dca 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/ADAM.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/ADAM.html @@ -1 +1 @@ -ADAM

ADAM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 25193%0 of 0n/a5843158
lambda$optimize$0()50%n/a111111
getMomentum()30%n/a111111
getVelocity()30%n/a111111
getTime()30%n/a111111
getLearningRate()30%n/a111111
optimize(Tensor)188100%n/a0101501
ADAM(long, double, Tensor, Tensor)27100%n/a010801
ADAM(long, double, Tensor)19100%n/a010401
\ No newline at end of file +ADAM

ADAM

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total17 of 25193%0 of 0n/a5843158
lambda$optimize$0()50%n/a111111
getMomentum()30%n/a111111
getVelocity()30%n/a111111
getTime()30%n/a111111
getLearningRate()30%n/a111111
optimize(Tensor)188100%n/a0101501
ADAM(long, double, Tensor, Tensor)27100%n/a010801
ADAM(long, double, Tensor)19100%n/a010401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/ADAM.java.html b/docs/coverage/test/html/neureka.optimization.implementations/ADAM.java.html index fb31e5fc8..e9a416061 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/ADAM.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/ADAM.java.html @@ -106,4 +106,4 @@ public final double getLearningRate() { return lr; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.html b/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.html index 79aac8f1e..5cf7e0b5b 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.html @@ -1 +1 @@ -ADAMFactory

ADAMFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 5648%1 of 250%4731036
create(Tensor, Tensor)100%n/a111111
withLearningRate(double)70%n/a111111
withTime(long)70%n/a111111
ADAMFactory(double, long)51372%1150%120501
create(Tensor)9100%n/a010101
ADAMFactory()5100%n/a010101
\ No newline at end of file +ADAMFactory

ADAMFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 5648%1 of 250%4731036
create(Tensor, Tensor)100%n/a111111
withLearningRate(double)70%n/a111111
withTime(long)70%n/a111111
ADAMFactory(double, long)51372%1150%120501
create(Tensor)9100%n/a010101
ADAMFactory()5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.java.html b/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.java.html index 5a3aba2f3..d5badd551 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/ADAMFactory.java.html @@ -33,4 +33,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.html b/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.html index b6b920ad9..fc853372e 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.html @@ -1 +1 @@ -AdaGrad

AdaGrad

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 84100%0 of 0n/a0201002
optimize(Tensor)58100%n/a010401
AdaGrad(Tensor, double)26100%n/a010601
\ No newline at end of file +AdaGrad

AdaGrad

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 84100%0 of 0n/a0201002
optimize(Tensor)58100%n/a010401
AdaGrad(Tensor, double)26100%n/a010601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.java.html b/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.java.html index bafe541bf..367e7ccb4 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/AdaGrad.java.html @@ -35,4 +35,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.html b/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.html index 475e361b4..82b6aa8e7 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.html @@ -1 +1 @@ -AdaGradFactory

AdaGradFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 2454%0 of 0n/a244624
AdaGradFactory(double)60%n/a113311
withLearningRate(double)50%n/a111111
create(Tensor)7100%n/a010101
AdaGradFactory()6100%n/a010101
\ No newline at end of file +AdaGradFactory

AdaGradFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total11 of 2454%0 of 0n/a244624
AdaGradFactory(double)60%n/a113311
withLearningRate(double)50%n/a111111
create(Tensor)7100%n/a010101
AdaGradFactory()6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.java.html b/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.java.html index 0cd45262f..ae40a5237 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/AdaGradFactory.java.html @@ -26,4 +26,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/Momentum.html b/docs/coverage/test/html/neureka.optimization.implementations/Momentum.html index e3f3b8b84..27d0408d7 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/Momentum.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/Momentum.html @@ -1 +1 @@ -Momentum

Momentum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 78100%0 of 0n/a0201202
optimize(Tensor)49100%n/a010501
Momentum(Tensor, double, double)29100%n/a010701
\ No newline at end of file +Momentum

Momentum

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 78100%0 of 0n/a0201202
optimize(Tensor)49100%n/a010501
Momentum(Tensor, double, double)29100%n/a010701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/Momentum.java.html b/docs/coverage/test/html/neureka.optimization.implementations/Momentum.java.html index 1384ada03..1d04997bb 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/Momentum.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/Momentum.java.html @@ -29,4 +29,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.html b/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.html index 78a6b6237..202b65fae 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.html @@ -1 +1 @@ -MomentumFactory

MomentumFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 4143%0 of 0n/a3561135
MomentumFactory(double, double)90%n/a114411
withLearningRate(double)70%n/a111111
withDecayRate(double)70%n/a111111
MomentumFactory()9100%n/a010401
create(Tensor)9100%n/a010101
\ No newline at end of file +MomentumFactory

MomentumFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total23 of 4143%0 of 0n/a3561135
MomentumFactory(double, double)90%n/a114411
withLearningRate(double)70%n/a111111
withDecayRate(double)70%n/a111111
MomentumFactory()9100%n/a010401
create(Tensor)9100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.java.html b/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.java.html index b81cd6265..373bfba4c 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/MomentumFactory.java.html @@ -35,4 +35,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.html b/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.html index 1adac35f8..8f8667ce9 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.html @@ -1 +1 @@ -RMSProp

RMSProp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 96100%0 of 0n/a0201102
optimize(Tensor)72100%n/a010501
RMSProp(Tensor, double, double)24100%n/a010601
\ No newline at end of file +RMSProp

RMSProp

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 96100%0 of 0n/a0201102
optimize(Tensor)72100%n/a010501
RMSProp(Tensor, double, double)24100%n/a010601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.java.html b/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.java.html index a40ebcd23..a964638aa 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/RMSProp.java.html @@ -36,4 +36,4 @@ return Tensor.of("-" + lr + " * ", g, " / ( ( ", h, " ** 0.5 ) + 1e-8 )"); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.html b/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.html index 9b68e6576..23c904f72 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.html @@ -1 +1 @@ -RMSPropFactory

RMSPropFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 4182%0 of 0n/a1511115
withDecayRate(double)70%n/a111111
RMSPropFactory()9100%n/a010401
RMSPropFactory(double, double)9100%n/a010401
create(Tensor)9100%n/a010101
withLearningRate(double)7100%n/a010101
\ No newline at end of file +RMSPropFactory

RMSPropFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total7 of 4182%0 of 0n/a1511115
withDecayRate(double)70%n/a111111
RMSPropFactory()9100%n/a010401
RMSPropFactory(double, double)9100%n/a010401
create(Tensor)9100%n/a010101
withLearningRate(double)7100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.java.html b/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.java.html index c00d5e8fe..5a92534fe 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/RMSPropFactory.java.html @@ -35,4 +35,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/SGD.html b/docs/coverage/test/html/neureka.optimization.implementations/SGD.html index a34958594..26c42f324 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/SGD.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/SGD.html @@ -1 +1 @@ -SGD

SGD

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 4080%0 of 0n/a241724
lambda$optimize$0()50%n/a111111
learningRate()30%n/a111111
optimize(Tensor)26100%n/a010301
SGD(double)6100%n/a010301
\ No newline at end of file +SGD

SGD

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total8 of 4080%0 of 0n/a241724
lambda$optimize$0()50%n/a111111
learningRate()30%n/a111111
optimize(Tensor)26100%n/a010301
SGD(double)6100%n/a010301
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/SGD.java.html b/docs/coverage/test/html/neureka.optimization.implementations/SGD.java.html index 301c6c55e..316e67b32 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/SGD.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/SGD.java.html @@ -68,4 +68,4 @@ public double learningRate() { return _lr; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.html b/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.html index 317c4f031..ebcf267b6 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.html @@ -1 +1 @@ -SGDFactory

SGDFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 23100%0 of 0n/a040604
SGDFactory()6100%n/a010101
SGDFactory(double)6100%n/a010301
create(Tensor)6100%n/a010101
withLearningRate(double)5100%n/a010101
\ No newline at end of file +SGDFactory

SGDFactory

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 23100%0 of 0n/a040604
SGDFactory()6100%n/a010101
SGDFactory(double)6100%n/a010301
create(Tensor)6100%n/a010101
withLearningRate(double)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.java.html b/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.java.html index dabe787cf..f70b96b35 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.java.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/SGDFactory.java.html @@ -26,4 +26,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/index.html b/docs/coverage/test/html/neureka.optimization.implementations/index.html index bb3ef7222..5f59f06cc 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/index.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/index.html @@ -1 +1 @@ -neureka.optimization.implementations

neureka.optimization.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total95 of 73487%1 of 250%1743191151642010
ADAMFactory292748%1150%473103601
MomentumFactory231843%n/a356113501
ADAM1723493%n/a584315801
AdaGradFactory111354%n/a24462401
SGD83280%n/a24172401
RMSPropFactory73482%n/a151111501
RMSProp96100%n/a020110201
AdaGrad84100%n/a020100201
Momentum78100%n/a020120201
SGDFactory23100%n/a04060401
\ No newline at end of file +neureka.optimization.implementations

neureka.optimization.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total95 of 73487%1 of 250%1743191151642010
ADAMFactory292748%1150%473103601
MomentumFactory231843%n/a356113501
ADAM1723493%n/a584315801
AdaGradFactory111354%n/a24462401
SGD83280%n/a24172401
RMSPropFactory73482%n/a151111501
RMSProp96100%n/a020110201
AdaGrad84100%n/a020100201
Momentum78100%n/a020120201
SGDFactory23100%n/a04060401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization.implementations/index.source.html b/docs/coverage/test/html/neureka.optimization.implementations/index.source.html index 7133cf81b..f9e1a2eeb 100644 --- a/docs/coverage/test/html/neureka.optimization.implementations/index.source.html +++ b/docs/coverage/test/html/neureka.optimization.implementations/index.source.html @@ -1 +1 @@ -neureka.optimization.implementations

neureka.optimization.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total95 of 73487%1 of 250%1743191151642010
ADAMFactory.java292748%1150%473103601
MomentumFactory.java231843%n/a356113501
ADAM.java1723493%n/a584315801
AdaGradFactory.java111354%n/a24462401
SGD.java83280%n/a24172401
RMSPropFactory.java73482%n/a151111501
RMSProp.java96100%n/a020110201
AdaGrad.java84100%n/a020100201
Momentum.java78100%n/a020120201
SGDFactory.java23100%n/a04060401
\ No newline at end of file +neureka.optimization.implementations

neureka.optimization.implementations

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total95 of 73487%1 of 250%1743191151642010
ADAMFactory.java292748%1150%473103601
MomentumFactory.java231843%n/a356113501
ADAM.java1723493%n/a584315801
AdaGradFactory.java111354%n/a24462401
SGD.java83280%n/a24172401
RMSPropFactory.java73482%n/a151111501
RMSProp.java96100%n/a020110201
AdaGrad.java84100%n/a020100201
Momentum.java78100%n/a020120201
SGDFactory.java23100%n/a04060401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization/Optimizer$1.html b/docs/coverage/test/html/neureka.optimization/Optimizer$1.html index 364cdf3b6..93187542e 100644 --- a/docs/coverage/test/html/neureka.optimization/Optimizer$1.html +++ b/docs/coverage/test/html/neureka.optimization/Optimizer$1.html @@ -1 +1 @@ -Optimizer.new Optimizer() {...}

Optimizer.new Optimizer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 130%0 of 0n/a333333
{...}60%n/a111111
optimize(Tensor)50%n/a111111
update(Component.OwnerChangeRequest)20%n/a111111
\ No newline at end of file +Optimizer.new Optimizer() {...}

Optimizer.new Optimizer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total13 of 130%0 of 0n/a333333
{...}60%n/a111111
optimize(Tensor)50%n/a111111
update(Component.OwnerChangeRequest)20%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization/Optimizer$2.html b/docs/coverage/test/html/neureka.optimization/Optimizer$2.html index c5e537e1d..a42b5fa05 100644 --- a/docs/coverage/test/html/neureka.optimization/Optimizer$2.html +++ b/docs/coverage/test/html/neureka.optimization/Optimizer$2.html @@ -1 +1 @@ -Optimizer.new Optimizer() {...}

Optimizer.new Optimizer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 2277%0 of 0n/a140314
lambda$optimize$0()50%n/a111111
optimize(Tensor)9100%n/a010101
{...}6100%n/a010101
update(Component.OwnerChangeRequest)2100%n/a010101
\ No newline at end of file +Optimizer.new Optimizer() {...}

Optimizer.new Optimizer() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 2277%0 of 0n/a140314
lambda$optimize$0()50%n/a111111
optimize(Tensor)9100%n/a010101
{...}6100%n/a010101
update(Component.OwnerChangeRequest)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization/Optimizer.html b/docs/coverage/test/html/neureka.optimization/Optimizer.html index cb04debb6..edc7dcc1c 100644 --- a/docs/coverage/test/html/neureka.optimization/Optimizer.html +++ b/docs/coverage/test/html/neureka.optimization/Optimizer.html @@ -1 +1 @@ -Optimizer

Optimizer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 3183%0 of 0n/a131713
of(Optimization)50%n/a111111
static {...}21100%n/a010501
ofGradient(Optimization)5100%n/a010101
\ No newline at end of file +Optimizer

Optimizer

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 3183%0 of 0n/a131713
of(Optimization)50%n/a111111
static {...}21100%n/a010501
ofGradient(Optimization)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization/Optimizer.java.html b/docs/coverage/test/html/neureka.optimization/Optimizer.java.html index 63e4176b5..613ee5ea1 100644 --- a/docs/coverage/test/html/neureka.optimization/Optimizer.java.html +++ b/docs/coverage/test/html/neureka.optimization/Optimizer.java.html @@ -100,4 +100,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization/index.html b/docs/coverage/test/html/neureka.optimization/index.html index 9497b1813..dfcbd0b20 100644 --- a/docs/coverage/test/html/neureka.optimization/index.html +++ b/docs/coverage/test/html/neureka.optimization/index.html @@ -1 +1 @@ -neureka.optimization

neureka.optimization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total23 of 6665%0 of 0n/a51031151013
Optimizer.new Optimizer() {...}130%n/a33333311
Optimizer52683%n/a13171301
Optimizer.new Optimizer() {...}51777%n/a14031401
\ No newline at end of file +neureka.optimization

neureka.optimization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total23 of 6665%0 of 0n/a51031151013
Optimizer.new Optimizer() {...}130%n/a33333311
Optimizer52683%n/a13171301
Optimizer.new Optimizer() {...}51777%n/a14031401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.optimization/index.source.html b/docs/coverage/test/html/neureka.optimization/index.source.html index ea02c3645..46548639a 100644 --- a/docs/coverage/test/html/neureka.optimization/index.source.html +++ b/docs/coverage/test/html/neureka.optimization/index.source.html @@ -1 +1 @@ -neureka.optimization

neureka.optimization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total23 of 6665%0 of 0n/a51031151013
Optimizer.java234365%n/a51031151013
\ No newline at end of file +neureka.optimization

neureka.optimization

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total23 of 6665%0 of 0n/a51031151013
Optimizer.java234365%n/a51031151013
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/NDPrintSettings.html b/docs/coverage/test/html/neureka.view/NDPrintSettings.html index 92fcaf7e4..706853226 100644 --- a/docs/coverage/test/html/neureka.view/NDPrintSettings.html +++ b/docs/coverage/test/html/neureka.view/NDPrintSettings.html @@ -1 +1 @@ -NDPrintSettings

NDPrintSettings

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total34 of 44692%19 of 4658%19591113036
with(String)127486%8850%8911701
setCellSize(int)21184%1150%120301
setRowLimit(int)21184%1150%120301
setIsMultiline(boolean)21184%1150%120301
setHasValue(boolean)21184%1150%120301
setHasShape(boolean)21184%1150%120301
setHasRecursiveGraph(boolean)21184%1150%120301
setHasDerivatives(boolean)21184%1150%120301
setIsCellBound(boolean)21184%1150%120301
setPrefix(String)21184%1150%120301
setPostfix(String)21184%1150%120301
setIndent(String)21184%1150%120301
_imposeOn(NDPrintSettings)57100%n/a0101501
NDPrintSettings(Supplier)45100%n/a0101601
setHasGradient(boolean)13100%2100%020301
setIsScientific(boolean)13100%2100%020301
setHasSlimNumbers(boolean)13100%2100%020301
setIsLegacy(boolean)13100%2100%020301
clone()10100%n/a010301
with(NDPrintSettings)5100%n/a010201
getCellSize()3100%n/a010101
getRowLimit()3100%n/a010101
getHasGradient()3100%n/a010101
getIsScientific()3100%n/a010101
getIsMultiline()3100%n/a010101
getHasSlimNumbers()3100%n/a010101
getHasValue()3100%n/a010101
getHasShape()3100%n/a010101
getHasRecursiveGraph()3100%n/a010101
getHasDerivatives()3100%n/a010101
getIsCellBound()3100%n/a010101
getPrefix()3100%n/a010101
getPostfix()3100%n/a010101
getIndent()3100%n/a010101
getIsLegacy()3100%n/a010101
lambda$clone$0()3100%n/a010101
\ No newline at end of file +NDPrintSettings

NDPrintSettings

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total34 of 44692%19 of 4658%19591113036
with(String)127486%8850%8911701
setCellSize(int)21184%1150%120301
setRowLimit(int)21184%1150%120301
setIsMultiline(boolean)21184%1150%120301
setHasValue(boolean)21184%1150%120301
setHasShape(boolean)21184%1150%120301
setHasRecursiveGraph(boolean)21184%1150%120301
setHasDerivatives(boolean)21184%1150%120301
setIsCellBound(boolean)21184%1150%120301
setPrefix(String)21184%1150%120301
setPostfix(String)21184%1150%120301
setIndent(String)21184%1150%120301
_imposeOn(NDPrintSettings)57100%n/a0101501
NDPrintSettings(Supplier)45100%n/a0101601
setHasGradient(boolean)13100%2100%020301
setIsScientific(boolean)13100%2100%020301
setHasSlimNumbers(boolean)13100%2100%020301
setIsLegacy(boolean)13100%2100%020301
clone()10100%n/a010301
with(NDPrintSettings)5100%n/a010201
getCellSize()3100%n/a010101
getRowLimit()3100%n/a010101
getHasGradient()3100%n/a010101
getIsScientific()3100%n/a010101
getIsMultiline()3100%n/a010101
getHasSlimNumbers()3100%n/a010101
getHasValue()3100%n/a010101
getHasShape()3100%n/a010101
getHasRecursiveGraph()3100%n/a010101
getHasDerivatives()3100%n/a010101
getIsCellBound()3100%n/a010101
getPrefix()3100%n/a010101
getPostfix()3100%n/a010101
getIndent()3100%n/a010101
getIsLegacy()3100%n/a010101
lambda$clone$0()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/NDPrintSettings.java.html b/docs/coverage/test/html/neureka.view/NDPrintSettings.java.html index 0899db540..6d45decb3 100644 --- a/docs/coverage/test/html/neureka.view/NDPrintSettings.java.html +++ b/docs/coverage/test/html/neureka.view/NDPrintSettings.java.html @@ -373,4 +373,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/NdaAsString$1.html b/docs/coverage/test/html/neureka.view/NdaAsString$1.html index 03b7a33ea..9e8490f39 100644 --- a/docs/coverage/test/html/neureka.view/NdaAsString$1.html +++ b/docs/coverage/test/html/neureka.view/NdaAsString$1.html @@ -1 +1 @@ -NdaAsString.new NdaAsString.Builder() {...}

NdaAsString.new NdaAsString.Builder() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 31100%0 of 0n/a040404
withConfig(String)10100%n/a010101
withConfig(NDPrintSettings)8100%n/a010101
byDefaults()7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file +NdaAsString.new NdaAsString.Builder() {...}

NdaAsString.new NdaAsString.Builder() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 31100%0 of 0n/a040404
withConfig(String)10100%n/a010101
withConfig(NDPrintSettings)8100%n/a010101
byDefaults()7100%n/a010101
{...}6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/NdaAsString$Util.html b/docs/coverage/test/html/neureka.view/NdaAsString$Util.html index abc7b6e1c..9cd2000b6 100644 --- a/docs/coverage/test/html/neureka.view/NdaAsString$Util.html +++ b/docs/coverage/test/html/neureka.view/NdaAsString$Util.html @@ -1 +1 @@ -NdaAsString.Util

NdaAsString.Util

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4192%0 of 0n/a151515
NdaAsString.Util()30%n/a111111
pad(int, String)13100%n/a010101
pad(String, int)13100%n/a010101
indent(int)6100%n/a010101
spaces(int)6100%n/a010101
\ No newline at end of file +NdaAsString.Util

NdaAsString.Util

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 4192%0 of 0n/a151515
NdaAsString.Util()30%n/a111111
pad(int, String)13100%n/a010101
pad(String, int)13100%n/a010101
indent(int)6100%n/a010101
spaces(int)6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/NdaAsString.html b/docs/coverage/test/html/neureka.view/NdaAsString.html index cb8adceb5..0200e8bd9 100644 --- a/docs/coverage/test/html/neureka.view/NdaAsString.html +++ b/docs/coverage/test/html/neureka.view/NdaAsString.html @@ -1 +1 @@ -NdaAsString

NdaAsString

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total70 of 1,75196%19 of 23691%201555255137
toString()1020095%54189%52403101
_recursiveFormatting(int[], int)843598%46293%43426001
lambda$_createStringItemFilter$2(String)82676%1583%140301
lambda$_createStringItemFilter$3(String)81869%1375%130201
lambda$_createBasicStringifierFor$10(boolean, Function, Object, int)81361%1150%121301
lambda$_createStringItemFilter$4(String)8642%1150%120101
_createBasicStringifierFor(Object, boolean)56893%11794%11012001
lambda$toString$16(String, String, String, GraphNode, ADAction)56392%1150%120501
NdaAsString(Nda, NDPrintSettings)410496%1150%1212501
lambda$_recursiveFormatting$19(NdaAsString.ValStringifier, int[])40%n/a111111
_buildSingleLabel(NDFrame, int, int[])7797%3975%3701001
formatFP(double)97100%12100%0701401
_stringifyAllValues()80100%12100%0701201
_buildRow(int, int, int, int[], NdaAsString.NDValStringifier, String)61100%10100%060701
_strShape()47100%8100%050701
_createStringItemFilter()35100%12100%0701001
lambda$_createValStringifierAndFormatter$0(NdaAsString.ValStringifier, int, int)35100%2100%020701
lambda$toString$17(GraphNode, ADAction)34100%2100%020301
_createValStringifierAndFormatter(Object)32100%4100%030801
_postProcessed(String)30100%2100%020501
lambda$_createValStringifierAndFormatter$1(NdaAsString.ValStringifier, int, int)29100%2100%020501
lambda$_createBasicStringifierFor$9(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$8(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$7(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$6(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$5(boolean, Function, Object, int)20100%2100%020301
lambda$toString$14(NDPrintSettings)14100%n/a010401
_breakAndIndent()10100%n/a010101
lambda$toString$15(Tensor)10100%n/a010101
lambda$_recursiveFormatting$18(NdaAsString.ValStringifier, int[])9100%n/a010101
_$(String)7100%n/a010101
_$(int)7100%n/a010101
lambda$_recursiveFormatting$20(NdaAsString.ValStringifier, int[])7100%n/a010101
lambda$_createBasicStringifierFor$13(Object, int)6100%n/a010101
lambda$_createBasicStringifierFor$12(Object, int)6100%n/a010101
lambda$_createBasicStringifierFor$11(Object, int)6100%n/a010101
representing(Nda)5100%n/a010101
\ No newline at end of file +NdaAsString

NdaAsString

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total70 of 1,75196%19 of 23691%201555255137
toString()1020095%54189%52403101
_recursiveFormatting(int[], int)843598%46293%43426001
lambda$_createStringItemFilter$2(String)82676%1583%140301
lambda$_createStringItemFilter$3(String)81869%1375%130201
lambda$_createBasicStringifierFor$10(boolean, Function, Object, int)81361%1150%121301
lambda$_createStringItemFilter$4(String)8642%1150%120101
_createBasicStringifierFor(Object, boolean)56893%11794%11012001
lambda$toString$16(String, String, String, GraphNode, ADAction)56392%1150%120501
NdaAsString(Nda, NDPrintSettings)410496%1150%1212501
lambda$_recursiveFormatting$19(NdaAsString.ValStringifier, int[])40%n/a111111
_buildSingleLabel(NDFrame, int, int[])7797%3975%3701001
formatFP(double)97100%12100%0701401
_stringifyAllValues()80100%12100%0701201
_buildRow(int, int, int, int[], NdaAsString.NDValStringifier, String)61100%10100%060701
_strShape()47100%8100%050701
_createStringItemFilter()35100%12100%0701001
lambda$_createValStringifierAndFormatter$0(NdaAsString.ValStringifier, int, int)35100%2100%020701
lambda$toString$17(GraphNode, ADAction)34100%2100%020301
_createValStringifierAndFormatter(Object)32100%4100%030801
_postProcessed(String)30100%2100%020501
lambda$_createValStringifierAndFormatter$1(NdaAsString.ValStringifier, int, int)29100%2100%020501
lambda$_createBasicStringifierFor$9(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$8(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$7(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$6(boolean, Function, Object, int)21100%2100%020301
lambda$_createBasicStringifierFor$5(boolean, Function, Object, int)20100%2100%020301
lambda$toString$14(NDPrintSettings)14100%n/a010401
_breakAndIndent()10100%n/a010101
lambda$toString$15(Tensor)10100%n/a010101
lambda$_recursiveFormatting$18(NdaAsString.ValStringifier, int[])9100%n/a010101
_$(String)7100%n/a010101
_$(int)7100%n/a010101
lambda$_recursiveFormatting$20(NdaAsString.ValStringifier, int[])7100%n/a010101
lambda$_createBasicStringifierFor$13(Object, int)6100%n/a010101
lambda$_createBasicStringifierFor$12(Object, int)6100%n/a010101
lambda$_createBasicStringifierFor$11(Object, int)6100%n/a010101
representing(Nda)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/NdaAsString.java.html b/docs/coverage/test/html/neureka.view/NdaAsString.java.html index d05a7e029..925e1997c 100644 --- a/docs/coverage/test/html/neureka.view/NdaAsString.java.html +++ b/docs/coverage/test/html/neureka.view/NdaAsString.java.html @@ -608,4 +608,4 @@ } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/index.html b/docs/coverage/test/html/neureka.view/index.html index 5fd2a7bcb..808d663a6 100644 --- a/docs/coverage/test/html/neureka.view/index.html +++ b/docs/coverage/test/html/neureka.view/index.html @@ -1 +1 @@ -neureka.view

neureka.view

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total107 of 2,26995%38 of 28286%40223737628204
NdaAsString701,68196%1921791%20155525513701
NDPrintSettings3441292%192758%1959111303601
NdaAsString.Util3892%n/a15151501
NdaAsString.new NdaAsString.Builder() {...}31100%n/a04040401
\ No newline at end of file +neureka.view

neureka.view

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total107 of 2,26995%38 of 28286%40223737628204
NdaAsString701,68196%1921791%20155525513701
NDPrintSettings3441292%192758%1959111303601
NdaAsString.Util3892%n/a15151501
NdaAsString.new NdaAsString.Builder() {...}31100%n/a04040401
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka.view/index.source.html b/docs/coverage/test/html/neureka.view/index.source.html index 1895d4896..0f5a479cc 100644 --- a/docs/coverage/test/html/neureka.view/index.source.html +++ b/docs/coverage/test/html/neureka.view/index.source.html @@ -1 +1 @@ -neureka.view

neureka.view

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total107 of 2,26995%38 of 28286%40223737628204
NdaAsString.java731,75095%1921791%21164626324603
NDPrintSettings.java3441292%192758%1959111303601
\ No newline at end of file +neureka.view

neureka.view

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total107 of 2,26995%38 of 28286%40223737628204
NdaAsString.java731,75095%1921791%21164626324603
NDPrintSettings.java3441292%192758%1959111303601
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/AbstractNda.html b/docs/coverage/test/html/neureka/AbstractNda.html index f2ac23666..e2e93db7e 100644 --- a/docs/coverage/test/html/neureka/AbstractNda.html +++ b/docs/coverage/test/html/neureka/AbstractNda.html @@ -1 +1 @@ -AbstractNda

AbstractNda

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total36 of 63694%11 of 8286%12748101133
_allocateVirtual()140%n/a116611
_setData(Data)74285%21285%281801
_convertedDataOfType(Class)72376%2250%231701
_setNDConf(NDConfiguration)43589%1583%140701
_getRawData()2777%1150%120101
getItemType()1694%2250%230101
getRepresentativeItemClass()1694%2250%230101
_initDataArrayFrom(Filler)121100%16100%0902001
_setDataAndCountUsage(Data)21100%6100%040601
lambda$_initDataArrayFrom$7(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$6(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$5(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$4(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$3(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$2(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$1(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$0(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$8(Object, Filler, int, int)19100%2100%020301
getDataType()16100%1375%130101
_guardGet(String)13100%n/a010101
_guardSet(String)13100%n/a010101
_guardMod(String)13100%n/a010101
_guard(String)12100%2100%020401
is(Class)12100%2100%020201
constructFor(AbstractNda, Device, NDConstructor)10100%n/a010101
_virtualize()8100%n/a010101
_actualize()8100%n/a010101
AbstractNda()6100%n/a010201
getNDConf()6100%n/a010101
_getData()6100%n/a010101
lambda$_setNDConf$10(int, int)4100%n/a010101
lambda$_setNDConf$9(int, int)4100%n/a010101
static {...}100%n/a010101
\ No newline at end of file +AbstractNda

AbstractNda

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total48 of 60292%14 of 8082%13701097130
_setData(Data)212857%5964%383801
_allocateVirtual()140%n/a116611
_convertedDataOfType(Class)72376%2250%231701
is(Class)21083%1150%120201
_getRawData()2777%1150%120101
getItemType()1694%2250%230101
getRepresentativeItemClass()1694%2250%230101
_initDataArrayFrom(Filler)121100%16100%0902001
_setDataAndCountUsage(Data)25100%8100%050601
lambda$_initDataArrayFrom$7(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$6(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$5(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$4(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$3(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$2(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$1(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$0(Object, Filler, int, int)21100%2100%020301
lambda$_initDataArrayFrom$8(Object, Filler, int, int)19100%2100%020301
getDataType()16100%1375%130101
_guardGet(String)13100%n/a010101
_guardSet(String)13100%n/a010101
_guardMod(String)13100%n/a010101
_guard(String)12100%2100%020401
_setNDConf(NDConfiguration)11100%2100%020301
constructFor(Device, NDConstructor)9100%n/a010101
_virtualize()8100%n/a010101
_actualize()8100%n/a010101
AbstractNda()6100%n/a010201
getNDConf()6100%n/a010101
_getData()6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/AbstractNda.java.html b/docs/coverage/test/html/neureka/AbstractNda.java.html index cdd3b1ae8..930a086ea 100644 --- a/docs/coverage/test/html/neureka/AbstractNda.java.html +++ b/docs/coverage/test/html/neureka/AbstractNda.java.html @@ -153,9 +153,9 @@ _guardSet( "data object" ); Object data = newData == null ? null : newData.getOrNull(); // Note: If the data is null, this might mean the tensor is outsourced (data is somewhere else) - if ( _data != null && _data.getOrNull() != data && data != null && _data.getOrNull() != null ) { - boolean isProbablyDeviceTransfer = ( _data.getOrNull().getClass().isArray() != data.getClass().isArray() ); - if ( !isProbablyDeviceTransfer) + if ( _data != null && _data.getOrNull() != data && data != null && _data.getOrNull() != null ) { + boolean isProbablyDeviceTransfer = ( _data.getOrNull().getClass().isArray() != data.getClass().isArray() ); + if ( !isProbablyDeviceTransfer) _version++; // Autograd must be warned! } _setDataAndCountUsage( newData ); @@ -167,7 +167,7 @@ if ( newData instanceof DeviceData ) ( (DeviceData<?>) newData ).incrementUsageCount(); - _data = newData; // This must be the only place where the data is set!!! + _data = ( newData != null ? newData : (Data<V>) Data.none()); // This must be the only place where the data is set!!! } protected <T> void _initDataArrayFrom( Filler<T> filler ) @@ -254,17 +254,13 @@ * * @return An {@link TensorConstructor} exposing a simple API for configuring a new {@link Tensor} instance. */ - protected static TensorConstructor constructFor(AbstractNda<?, ?> nda, Device<?> targetDevice, NDConstructor ndConstructor ) + protected static TensorConstructor constructFor( Device<?> targetDevice, NDConstructor ndConstructor ) { return - new TensorConstructor( - targetDevice, ndConstructor, - new TensorConstructor.API() { - @Override public void setConf( NDConfiguration conf ) { nda.mut().setNDConf( conf ); } - @Override public void setData( Data o ) { nda._setData( o ); } - @Override public void setIsVirtual( boolean isVirtual ) { nda._setIsVirtual( isVirtual ); } - } - ); + new TensorConstructor( + targetDevice, ndConstructor, + new TensorConstructor.Args() + ); } /** @@ -276,7 +272,7 @@ * It would be unreasonable to allocate an arrays filled entirely with one and the same value item! * <br> */ - protected final void _virtualize() { _setDataAndCountUsage(getDevice().access(this).virtualize()); } + protected final void _virtualize() { _setDataAndCountUsage(getDevice().access(this).virtualize()); } /** * An actual NDArray (tensor) is the opposite to a virtual one. <br> @@ -288,21 +284,21 @@ * This method turns the data of a virtual NDArray into a newly allocated data array matching the * size of the nd-array type... <br> */ - protected final void _actualize() { _setDataAndCountUsage(getDevice().access(this).actualize()); } + protected final void _actualize() { _setDataAndCountUsage(getDevice().access(this).actualize()); } protected Object _convertedDataOfType( Class<?> typeClass ) { - DataType<?> newDT = DataType.of( typeClass ); - if ( - newDT.typeClassImplements( NumericType.class ) + DataType<?> newDT = DataType.of( typeClass ); + if ( + newDT.typeClassImplements( NumericType.class ) && - getDataType().typeClassImplements( NumericType.class ) + getDataType().typeClassImplements( NumericType.class ) ) { - NumericType<?,Object, ?, Object> targetType = (NumericType<?, Object,?, Object>) newDT.getTypeClassInstance(NumericType.class); - return targetType.readForeignDataFrom( iterator(), this.size() ); + NumericType<?,Object, ?, Object> targetType = (NumericType<?, Object,?, Object>) newDT.getTypeClassInstance(NumericType.class); + return targetType.readForeignDataFrom( iterator(), this.size() ); } else - return DataConverter.get().convert( _getRawData(), newDT.getRepresentativeType() ); + return DataConverter.get().convert( _getRawData(), newDT.getRepresentativeType() ); } /** @@ -310,8 +306,8 @@ */ @Override public boolean is( Class<?> typeClass ) { - DataType<?> type = DataType.of( typeClass ); - return type == _getData().dataType(); + DataType<?> type = DataType.of( typeClass ); + return type == _getData().dataType(); } /** @@ -322,14 +318,9 @@ */ protected void _setNDConf( NDConfiguration ndConfiguration ) { - _guardSet( "ND-Configuration" ); - if ( _NDConf != null && ndConfiguration != null ) { - int s1 = Arrays.stream( _NDConf.shape() ).map( Math::abs ).reduce( 1, ( a, b ) -> a * b ); - int s2 = Arrays.stream( ndConfiguration.shape() ).map( Math::abs ).reduce( 1, ( a, b ) -> a * b ); - assert s1 == s2; - } - _NDConf = ndConfiguration; - } + _guardSet( "ND-Configuration" ); + _NDConf = ( ndConfiguration != null ? ndConfiguration : NDConfiguration.none() ); + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Data.html b/docs/coverage/test/html/neureka/Data.html index 502e477c7..4c558014b 100644 --- a/docs/coverage/test/html/neureka/Data.html +++ b/docs/coverage/test/html/neureka/Data.html @@ -1 +1 @@ -Data

Data

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total54 of 9543%3 of 650%915717612
as(Class)191442%2250%231401
get()5758%1150%120301
of(Class, Object[])50%n/a111111
of(byte[])50%n/a111111
of(short[])50%n/a111111
of(boolean[])50%n/a111111
of(char[])50%n/a111111
of(String[])50%n/a111111
of(float[])5100%n/a010101
of(double[])5100%n/a010101
of(int[])5100%n/a010101
of(long[])5100%n/a010101
\ No newline at end of file +Data

Data

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total54 of 9744%3 of 650%916718613
as(Class)191442%2250%231401
get()5758%1150%120301
of(Class, Object[])50%n/a111111
of(byte[])50%n/a111111
of(short[])50%n/a111111
of(boolean[])50%n/a111111
of(char[])50%n/a111111
of(String[])50%n/a111111
of(float[])5100%n/a010101
of(double[])5100%n/a010101
of(int[])5100%n/a010101
of(long[])5100%n/a010101
none()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Data.java.html b/docs/coverage/test/html/neureka/Data.java.html index 0ca98622c..434327a35 100644 --- a/docs/coverage/test/html/neureka/Data.java.html +++ b/docs/coverage/test/html/neureka/Data.java.html @@ -16,25 +16,36 @@ */ public interface Data<V> { - static <V> Data<V> of( Class<V> type, V... data ) { return CPU.get().allocate( type, data ); } + /** + * This is a static factory method which returns a {@link Data} object + * which does not contain any data. It is a sort of no-operation null object + * which can be used to represent the absence of data. + * A deleted tensor will typically have a {@link Data} object which does not contain any data. + * + * @return A {@link Data} object which does not contain any data. + */ + static Data<Void> none() { return NoOpData.INSTANCE; } + + + static <V> Data<V> of( Class<V> type, V... data ) { return CPU.get().allocate( type, data ); } - static Data<Float> of( float... items ) { return CPU.get().allocate( Float.class, items ); } + static Data<Float> of( float... items ) { return CPU.get().allocate( Float.class, items ); } - static Data<Double> of( double... items ) { return CPU.get().allocate( Double.class, items ); } + static Data<Double> of( double... items ) { return CPU.get().allocate( Double.class, items ); } - static Data<Integer> of( int... items ) { return CPU.get().allocate( Integer.class, items ); } + static Data<Integer> of( int... items ) { return CPU.get().allocate( Integer.class, items ); } - static Data<Long> of( long... items ) { return CPU.get().allocate( Long.class, items ); } + static Data<Long> of( long... items ) { return CPU.get().allocate( Long.class, items ); } - static Data<Byte> of( byte... items ) { return CPU.get().allocate( Byte.class, items ); } + static Data<Byte> of( byte... items ) { return CPU.get().allocate( Byte.class, items ); } - static Data<Short> of( short... items ) { return CPU.get().allocate( Short.class, items ); } + static Data<Short> of( short... items ) { return CPU.get().allocate( Short.class, items ); } - static Data<Boolean> of( boolean... items ) { return CPU.get().allocate( Boolean.class, items ); } + static Data<Boolean> of( boolean... items ) { return CPU.get().allocate( Boolean.class, items ); } - static Data<Character> of( char... items ) { return CPU.get().allocate( Character.class, items ); } + static Data<Character> of( char... items ) { return CPU.get().allocate( Character.class, items ); } - static Data<String> of( String... items ) { return CPU.get().allocate( String.class, items ); } + static Data<String> of( String... items ) { return CPU.get().allocate( String.class, items ); } /** * @return The owner of this data array wrapper (the device which allocated the memory). @@ -63,9 +74,9 @@ * @return The raw data object underlying a nd-array/tensor. */ default Object get() { - Object data = getOrNull(); - if ( data == null ) throw new NullPointerException("The data reference is missing!"); - return data; + Object data = getOrNull(); + if ( data == null ) throw new NullPointerException("The data reference is missing!"); + return data; } /** @@ -78,10 +89,10 @@ * @return The raw data object underlying a nd-array/tensor. */ default <D> D as( Class<D> dataType ) { - Object data = getOrNull(); - if ( data != null && !dataType.isAssignableFrom(data.getClass()) ) - throw new IllegalArgumentException("Provided data type '"+dataType+"' is not assignable from '"+data.getClass()+"'."); - return dataType.cast(data); + Object data = getOrNull(); + if ( data != null && !dataType.isAssignableFrom(data.getClass()) ) + throw new IllegalArgumentException("Provided data type '"+dataType+"' is not assignable from '"+data.getClass()+"'."); + return dataType.cast(data); } /** @@ -100,4 +111,4 @@ */ int usages(); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/MutateNda.html b/docs/coverage/test/html/neureka/MutateNda.html index 4dfcd26b5..c4bc88a3d 100644 --- a/docs/coverage/test/html/neureka/MutateNda.html +++ b/docs/coverage/test/html/neureka/MutateNda.html @@ -1 +1 @@ -MutateNda

MutateNda

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 320%0 of 0n/a222222
set(int, int, int, Object)180%n/a111111
set(int, int, Object)140%n/a111111
\ No newline at end of file +MutateNda

MutateNda

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total32 of 320%0 of 0n/a222222
set(int, int, int, Object)180%n/a111111
set(int, int, Object)140%n/a111111
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/MutateNda.java.html b/docs/coverage/test/html/neureka/MutateNda.java.html index 044d0c0a9..b2c0e8824 100644 --- a/docs/coverage/test/html/neureka/MutateNda.java.html +++ b/docs/coverage/test/html/neureka/MutateNda.java.html @@ -283,4 +283,4 @@ void set( V value ); } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/MutateTensor.html b/docs/coverage/test/html/neureka/MutateTensor.html index 505a4f542..c41077c40 100644 --- a/docs/coverage/test/html/neureka/MutateTensor.html +++ b/docs/coverage/test/html/neureka/MutateTensor.html @@ -1 +1 @@ -MutateTensor

MutateTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 9770%3 of 650%3831305
getDataForWriting(Class)292647%3350%343901
set(int, int, int, Object)18100%n/a010101
set(int, int, Object)14100%n/a010101
set(int[], Object)5100%n/a010101
set(int, Object)5100%n/a010101
\ No newline at end of file +MutateTensor

MutateTensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total29 of 9770%3 of 650%3831305
getDataForWriting(Class)292647%3350%343901
set(int, int, int, Object)18100%n/a010101
set(int, int, Object)14100%n/a010101
set(int[], Object)5100%n/a010101
set(int, Object)5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/MutateTensor.java.html b/docs/coverage/test/html/neureka/MutateTensor.java.html index 699c06142..681f29e2e 100644 --- a/docs/coverage/test/html/neureka/MutateTensor.java.html +++ b/docs/coverage/test/html/neureka/MutateTensor.java.html @@ -328,4 +328,4 @@ Tensor<T> setIsVirtual(boolean isVirtual ); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Nda$Item.html b/docs/coverage/test/html/neureka/Nda$Item.html index 0743ad982..4c015e3c2 100644 --- a/docs/coverage/test/html/neureka/Nda$Item.html +++ b/docs/coverage/test/html/neureka/Nda$Item.html @@ -1 +1 @@ -Nda.Item

Nda.Item

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 6478%4 of 1471%41321416
map(Function)120%20%222211
exists()1685%1150%120101
doesNotExist()1685%1150%120101
orElse(Object)16100%4100%030401
get()12100%2100%020401
toOptional()10100%2100%020201
\ No newline at end of file +Nda.Item

Nda.Item

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total14 of 6478%4 of 1471%41321416
map(Function)120%20%222211
exists()1685%1150%120101
doesNotExist()1685%1150%120101
orElse(Object)16100%4100%030401
get()12100%2100%020401
toOptional()10100%2100%020201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Nda.html b/docs/coverage/test/html/neureka/Nda.html index 779f79db5..9a6caf13d 100644 --- a/docs/coverage/test/html/neureka/Nda.html +++ b/docs/coverage/test/html/neureka/Nda.html @@ -1 +1 @@ -Nda

Nda

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total152 of 62075%15 of 5070%289019991665
of(float[])120%n/a111111
of(byte[])120%n/a111111
of(long[])120%n/a111111
of(short[])120%n/a111111
count(Predicate)101152%3125%230201
lambda$flatMap$5(Function, Object)91359%3125%233601
none(Predicate)9950%3125%230201
of(Shape, Object[])60%n/a111111
getDataAs(Class)60%n/a111111
lambda$shaped$6(List, List)60%n/a111111
item(int[])55691%1583%140801
any(Predicate)5964%1150%120201
of(Shape, double[])50%n/a111111
of(Shape, float[])50%n/a111111
of(Shape, byte[])50%n/a111111
of(Shape, int[])50%n/a111111
of(Shape, long[])50%n/a111111
of(Shape, short[])50%n/a111111
of(Shape, boolean[])50%n/a111111
minItem(Comparator)31178%1150%120201
maxItem(Comparator)31178%1150%120201
of(Iterable)30%n/a111111
mut()30%n/a111111
isFullSlice()1990%2250%230101
stream()110100%20100%01102001
toString(Consumer)15100%n/a010301
every(Predicate)14100%2100%020201
of(double)12100%n/a010101
of(double[])12100%n/a010101
of(int[])12100%n/a010101
of(boolean[])12100%n/a010101
getLabel()10100%n/a010101
shaped(int[])9100%n/a010101
flatMap(Function)6100%n/a010101
getItems()6100%n/a010101
item(int)6100%n/a010101
getItemsAs(Class)6100%n/a010101
toString(NDPrintSettings)6100%n/a010101
lambda$stream$4(Object, int)6100%n/a010101
lambda$stream$3(Object, int)6100%n/a010101
lambda$stream$2(Object, int)6100%n/a010101
lambda$stream$1(Object, int)6100%n/a010101
lambda$stream$0(Object, int)6100%n/a010101
of(Class)5100%n/a010101
filter(Predicate)5100%n/a010101
lambda$shaped$7(int[], List)5100%n/a010101
item()4100%n/a010101
ofStrings()3100%n/a010101
ofInts()3100%n/a010101
ofDoubles()3100%n/a010101
ofFloats()3100%n/a010101
ofLongs()3100%n/a010101
ofBooleans()3100%n/a010101
ofChars()3100%n/a010101
ofBytes()3100%n/a010101
ofShorts()3100%n/a010101
ofObjects()3100%n/a010101
ofNumbers()3100%n/a010101
ofBigDecimals()3100%n/a010101
of(Object[])3100%n/a010101
of(List)3100%n/a010101
label()3100%n/a010101
itemType()3100%n/a010101
items()3100%n/a010101
getItem()3100%n/a010101
\ No newline at end of file +Nda

Nda

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total158 of 62074%17 of 5066%309019991665
of(float[])120%n/a111111
of(byte[])120%n/a111111
of(long[])120%n/a111111
of(short[])120%n/a111111
count(Predicate)101152%3125%230201
lambda$flatMap$5(Function, Object)91359%3125%233601
none(Predicate)9950%3125%230201
stream()610494%21890%21102001
of(Shape, Object[])60%n/a111111
getDataAs(Class)60%n/a111111
lambda$shaped$6(List, List)60%n/a111111
item(int[])55691%1583%140801
any(Predicate)5964%1150%120201
of(Shape, double[])50%n/a111111
of(Shape, float[])50%n/a111111
of(Shape, byte[])50%n/a111111
of(Shape, int[])50%n/a111111
of(Shape, long[])50%n/a111111
of(Shape, short[])50%n/a111111
of(Shape, boolean[])50%n/a111111
minItem(Comparator)31178%1150%120201
maxItem(Comparator)31178%1150%120201
of(Iterable)30%n/a111111
mut()30%n/a111111
isFullSlice()1990%2250%230101
toString(Consumer)15100%n/a010301
every(Predicate)14100%2100%020201
of(double)12100%n/a010101
of(double[])12100%n/a010101
of(int[])12100%n/a010101
of(boolean[])12100%n/a010101
getLabel()10100%n/a010101
shaped(int[])9100%n/a010101
flatMap(Function)6100%n/a010101
getItems()6100%n/a010101
item(int)6100%n/a010101
getItemsAs(Class)6100%n/a010101
toString(NDPrintSettings)6100%n/a010101
lambda$stream$4(Object, int)6100%n/a010101
lambda$stream$3(Object, int)6100%n/a010101
lambda$stream$2(Object, int)6100%n/a010101
lambda$stream$1(Object, int)6100%n/a010101
lambda$stream$0(Object, int)6100%n/a010101
of(Class)5100%n/a010101
filter(Predicate)5100%n/a010101
lambda$shaped$7(int[], List)5100%n/a010101
item()4100%n/a010101
ofStrings()3100%n/a010101
ofInts()3100%n/a010101
ofDoubles()3100%n/a010101
ofFloats()3100%n/a010101
ofLongs()3100%n/a010101
ofBooleans()3100%n/a010101
ofChars()3100%n/a010101
ofBytes()3100%n/a010101
ofShorts()3100%n/a010101
ofObjects()3100%n/a010101
ofNumbers()3100%n/a010101
ofBigDecimals()3100%n/a010101
of(Object[])3100%n/a010101
of(List)3100%n/a010101
label()3100%n/a010101
itemType()3100%n/a010101
items()3100%n/a010101
getItem()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Nda.java.html b/docs/coverage/test/html/neureka/Nda.java.html index 8be774bd0..502f5ee76 100644 --- a/docs/coverage/test/html/neureka/Nda.java.html +++ b/docs/coverage/test/html/neureka/Nda.java.html @@ -504,8 +504,8 @@ else stream = (Stream<V>) Arrays.stream( (Object[]) rawItems ); - boolean executeInParallel = ( this.size() > 1_000 ); - return executeInParallel ? stream.parallel() : stream; + boolean executeInParallel = ( this.size() > 1_000 ); + return executeInParallel ? stream.parallel() : stream; } /** @@ -1178,4 +1178,4 @@ String toString(); } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka$Settings$AutoGrad.html b/docs/coverage/test/html/neureka/Neureka$Settings$AutoGrad.html index 2f86021eb..1d0c9ff7f 100644 --- a/docs/coverage/test/html/neureka/Neureka$Settings$AutoGrad.html +++ b/docs/coverage/test/html/neureka/Neureka$Settings$AutoGrad.html @@ -1 +1 @@ -Neureka.Settings.AutoGrad

Neureka.Settings.AutoGrad

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 108100%3 of 1478%317026010
toString()27100%n/a010501
Neureka.Settings.AutoGrad(Neureka.Settings)18100%n/a010501
setIsPreventingInlineOperations(boolean)14100%1375%130301
setIsRetainingPendingErrorForJITProp(boolean)14100%1375%130301
setIsApplyingGradientWhenTensorIsUsed(boolean)14100%1375%130301
setIsApplyingGradientWhenRequested(boolean)9100%2100%020301
isPreventingInlineOperations()3100%n/a010101
isRetainingPendingErrorForJITProp()3100%n/a010101
isApplyingGradientWhenTensorIsUsed()3100%n/a010101
isApplyingGradientWhenRequested()3100%n/a010101
\ No newline at end of file +Neureka.Settings.AutoGrad

Neureka.Settings.AutoGrad

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 108100%3 of 1478%317026010
toString()27100%n/a010501
Neureka.Settings.AutoGrad(Neureka.Settings)18100%n/a010501
setIsPreventingInlineOperations(boolean)14100%1375%130301
setIsRetainingPendingErrorForJITProp(boolean)14100%1375%130301
setIsApplyingGradientWhenTensorIsUsed(boolean)14100%1375%130301
setIsApplyingGradientWhenRequested(boolean)9100%2100%020301
isPreventingInlineOperations()3100%n/a010101
isRetainingPendingErrorForJITProp()3100%n/a010101
isApplyingGradientWhenTensorIsUsed()3100%n/a010101
isApplyingGradientWhenRequested()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka$Settings$DType.html b/docs/coverage/test/html/neureka/Neureka$Settings$DType.html index fbe1068f6..8457ab028 100644 --- a/docs/coverage/test/html/neureka/Neureka$Settings$DType.html +++ b/docs/coverage/test/html/neureka/Neureka$Settings$DType.html @@ -1 +1 @@ -Neureka.Settings.DType

Neureka.Settings.DType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 5796%2 of 450%2901407
setDefaultDataTypeClass(Class)1888%1150%120301
setIsAutoConvertingExternalDataToJVMTypes(boolean)1888%1150%120301
toString()17100%n/a010201
Neureka.Settings.DType(Neureka.Settings)12100%n/a010301
getDefaultDataType()4100%n/a010101
getDefaultDataTypeClass()3100%n/a010101
getIsAutoConvertingExternalDataToJVMTypes()3100%n/a010101
\ No newline at end of file +Neureka.Settings.DType

Neureka.Settings.DType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 5796%2 of 450%2901407
setDefaultDataTypeClass(Class)1888%1150%120301
setIsAutoConvertingExternalDataToJVMTypes(boolean)1888%1150%120301
toString()17100%n/a010201
Neureka.Settings.DType(Neureka.Settings)12100%n/a010301
getDefaultDataType()4100%n/a010101
getDefaultDataTypeClass()3100%n/a010101
getIsAutoConvertingExternalDataToJVMTypes()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka$Settings$Debug.html b/docs/coverage/test/html/neureka/Neureka$Settings$Debug.html index 708d7e9eb..74daec89c 100644 --- a/docs/coverage/test/html/neureka/Neureka$Settings$Debug.html +++ b/docs/coverage/test/html/neureka/Neureka$Settings$Debug.html @@ -1 +1 @@ -Neureka.Settings.Debug

Neureka.Settings.Debug

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total1 of 4897%1 of 475%1801306
setIsDeletingIntermediateTensors(boolean)1888%1150%120301
Neureka.Settings.Debug(Neureka.Settings)12100%n/a010301
toString()12100%n/a010201
setIsKeepingDerivativeTargetPayloads(boolean)9100%2100%020301
isKeepingDerivativeTargetPayloads()3100%n/a010101
isDeletingIntermediateTensors()3100%n/a010101
\ No newline at end of file +Neureka.Settings.Debug

Neureka.Settings.Debug

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total1 of 4897%1 of 475%1801306
setIsDeletingIntermediateTensors(boolean)1888%1150%120301
Neureka.Settings.Debug(Neureka.Settings)12100%n/a010301
toString()12100%n/a010201
setIsKeepingDerivativeTargetPayloads(boolean)9100%2100%020301
isKeepingDerivativeTargetPayloads()3100%n/a010101
isDeletingIntermediateTensors()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka$Settings$NDim.html b/docs/coverage/test/html/neureka/Neureka$Settings$NDim.html index 52d2cff03..5bcf51b8a 100644 --- a/docs/coverage/test/html/neureka/Neureka$Settings$NDim.html +++ b/docs/coverage/test/html/neureka/Neureka$Settings$NDim.html @@ -1 +1 @@ -Neureka.Settings.NDim

Neureka.Settings.NDim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 33100%0 of 2100%050804
toString()12100%n/a010201
Neureka.Settings.NDim(Neureka.Settings)9100%n/a010201
setIsOnlyUsingDefaultNDConfiguration(boolean)9100%2100%020301
isOnlyUsingDefaultNDConfiguration()3100%n/a010101
\ No newline at end of file +Neureka.Settings.NDim

Neureka.Settings.NDim

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 33100%0 of 2100%050804
toString()12100%n/a010201
Neureka.Settings.NDim(Neureka.Settings)9100%n/a010201
setIsOnlyUsingDefaultNDConfiguration(boolean)9100%2100%020301
isOnlyUsingDefaultNDConfiguration()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka$Settings$View.html b/docs/coverage/test/html/neureka/Neureka$Settings$View.html index 57b5d13f4..5a758e09b 100644 --- a/docs/coverage/test/html/neureka/Neureka$Settings$View.html +++ b/docs/coverage/test/html/neureka/Neureka$Settings$View.html @@ -1 +1 @@ -Neureka.Settings.View

Neureka.Settings.View

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 37100%0 of 0n/a050505
Neureka.Settings.View(Neureka.Settings)13100%n/a010101
toString()12100%n/a010201
ndArrays(Consumer)5100%n/a010101
lambda$new$0(Neureka.Settings)4100%n/a010101
getNDPrintSettings()3100%n/a010101
\ No newline at end of file +Neureka.Settings.View

Neureka.Settings.View

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 37100%0 of 0n/a050505
Neureka.Settings.View(Neureka.Settings)13100%n/a010101
toString()12100%n/a010201
ndArrays(Consumer)5100%n/a010101
lambda$new$0(Neureka.Settings)4100%n/a010101
getNDPrintSettings()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka$Settings.html b/docs/coverage/test/html/neureka/Neureka$Settings.html index 800615a5b..1cecc2575 100644 --- a/docs/coverage/test/html/neureka/Neureka$Settings.html +++ b/docs/coverage/test/html/neureka/Neureka$Settings.html @@ -1 +1 @@ -Neureka.Settings

Neureka.Settings

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 15998%2 of 666%218133015
notModifiable()31885%2466%241601
Neureka.Settings(Neureka)39100%n/a010801
toString()37100%n/a010201
debug(Object)8100%n/a010201
autograd(Object)8100%n/a010201
view(Object)8100%n/a010201
ndim(Object)8100%n/a010201
dtype(Object)8100%n/a010201
setIsLocked(boolean)4100%n/a010101
debug()3100%n/a010101
autograd()3100%n/a010101
view()3100%n/a010101
ndim()3100%n/a010101
dtype()3100%n/a010101
isLocked()3100%n/a010101
\ No newline at end of file +Neureka.Settings

Neureka.Settings

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total3 of 15998%2 of 666%218133015
notModifiable()31885%2466%241601
Neureka.Settings(Neureka)39100%n/a010801
toString()37100%n/a010201
debug(Object)8100%n/a010201
autograd(Object)8100%n/a010201
view(Object)8100%n/a010201
ndim(Object)8100%n/a010201
dtype(Object)8100%n/a010201
setIsLocked(boolean)4100%n/a010101
debug()3100%n/a010101
autograd()3100%n/a010101
view()3100%n/a010101
ndim()3100%n/a010101
dtype()3100%n/a010101
isLocked()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka$Utility.html b/docs/coverage/test/html/neureka/Neureka$Utility.html index e20c958d1..29fb79d67 100644 --- a/docs/coverage/test/html/neureka/Neureka$Utility.html +++ b/docs/coverage/test/html/neureka/Neureka$Utility.html @@ -1 +1 @@ -Neureka.Utility

Neureka.Utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total53 of 11654%2 of 875%2772403
readResource(String)293957%1583%1441301
isPresent(String, Supplier)242146%1150%1231001
Neureka.Utility()3100%n/a010101
\ No newline at end of file +Neureka.Utility

Neureka.Utility

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total34 of 11670%2 of 875%2762403
readResource(String)293957%1583%1441301
isPresent(String, Supplier)54088%1150%1221001
Neureka.Utility()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka.html b/docs/coverage/test/html/neureka/Neureka.html index 3b37e5355..d755780e3 100644 --- a/docs/coverage/test/html/neureka/Neureka.html +++ b/docs/coverage/test/html/neureka/Neureka.html @@ -1 +1 @@ -Neureka

Neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total101 of 36372%15 of 3658%16391681121
backend()307370%5964%5852301
reset()21725%n/a015901
get()152865%1375%1321101
set(Neureka)15937%1150%122501
_illegalStateFor(String)140%n/a112211
_currentThreadIsNotAuthorized()2777%1150%120101
canAccessOpenCLDevice()11894%2250%230501
canAccessOpenCL()11794%2250%230501
lambda$canAccessOpenCLDevice$2(CLBackend)1787%1150%120101
lambda$canAccessOpenCL$1(CLBackend)1787%1150%120101
toString()22100%n/a010101
Neureka()15100%n/a010401
static {...}14100%n/a010501
configure(Object)12100%1150%120301
settings(Object)8100%n/a010201
setBackend(BackendContext)4100%n/a010101
settings()3100%n/a010101
utility()3100%n/a010101
getBackend()3100%n/a010101
lambda$static$0()3100%n/a010101
version()2100%n/a010101
\ No newline at end of file +Neureka

Neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total146 of 36359%19 of 3647%18392281321
backend()356866%5964%5852301
reset()21725%n/a015901
get()152865%1375%1321101
set(Neureka)15937%1150%122501
canAccessOpenCLDevice()14526%3125%233501
canAccessOpenCL()14422%3125%233501
_illegalStateFor(String)140%n/a112211
lambda$canAccessOpenCLDevice$2(CLBackend)80%20%221111
lambda$canAccessOpenCL$1(CLBackend)80%20%221111
_currentThreadIsNotAuthorized()2777%1150%120101
toString()22100%n/a010101
Neureka()15100%n/a010401
static {...}14100%n/a010501
configure(Object)12100%1150%120301
settings(Object)8100%n/a010201
setBackend(BackendContext)4100%n/a010101
settings()3100%n/a010101
utility()3100%n/a010101
getBackend()3100%n/a010101
lambda$static$0()3100%n/a010101
version()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Neureka.java.html b/docs/coverage/test/html/neureka/Neureka.java.html index dcb130b14..d025d4d73 100644 --- a/docs/coverage/test/html/neureka/Neureka.java.html +++ b/docs/coverage/test/html/neureka/Neureka.java.html @@ -78,7 +78,7 @@ /** * The current semantic version of this library build. */ - private static String _VERSION = "1.0.0"; + private static String _VERSION = "1.0.1"; /** * The truth value determining if OpenCL is available or not. @@ -139,9 +139,9 @@ _backend.set( new CPUBackend() ); // CPU (JVM) is always available! if ( _OPENCL_AVAILABLE ) - _backend.set( new CLBackend() ); // OpenCL is available if the jocl dependency can find OpenCL drivers! + _backend.set( new CLBackend() ); // OpenCL is available if the jocl dependency can find OpenCL drivers! else - _LOG.debug( Messages.clContextCreationFailed() ); + _LOG.debug( Messages.clContextCreationFailed() ); } return _backend; } @@ -217,10 +217,10 @@ */ public boolean canAccessOpenCL() { return _OPENCL_AVAILABLE && - get().backend() - .find(CLBackend.class) - .map( it -> it.getTotalNumberOfDevices() > 0 ) - .orElse(false); + get().backend() + .find(CLBackend.class) + .map( it -> it.getTotalNumberOfDevices() > 0 ) + .orElse(false); } /** @@ -228,10 +228,10 @@ */ public boolean canAccessOpenCLDevice() { return canAccessOpenCL() && - get().backend() - .find(CLBackend.class) - .map( it -> it.getTotalNumberOfDevices() > 0 ) - .orElse(false); + get().backend() + .find(CLBackend.class) + .map( it -> it.getTotalNumberOfDevices() > 0 ) + .orElse(false); } /** @@ -772,14 +772,14 @@ boolean found = false; String cause = " unknown "; try { - Class.forName( className ); - found = true; - } catch ( Throwable ex ) {// Class or one of its dependencies is not present... - cause = ex.getMessage(); + Class.forName( className ); + found = true; + } catch ( Throwable ex ) {// Class or one of its dependencies is not present... + cause = ex.getMessage(); } finally { String tipMessage = tip.get().replace("\n", "\n "+" ").trim(); if ( !found ) - _LOG.debug( + _LOG.debug( "Neureka:\n" + " info: Failed to load class '" + className + "'!" + "\n" + " cause: " + cause + "\n" + @@ -792,4 +792,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/NoOpData.html b/docs/coverage/test/html/neureka/NoOpData.html new file mode 100644 index 000000000..51cf14ca7 --- /dev/null +++ b/docs/coverage/test/html/neureka/NoOpData.html @@ -0,0 +1 @@ +NoOpData

NoOpData

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 1464%0 of 0n/a252525
dataType()30%n/a111111
usages()20%n/a111111
static {...}5100%n/a010101
owner()2100%n/a010101
getOrNull()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/NoOpData.java.html b/docs/coverage/test/html/neureka/NoOpData.java.html new file mode 100644 index 000000000..1026170e3 --- /dev/null +++ b/docs/coverage/test/html/neureka/NoOpData.java.html @@ -0,0 +1,33 @@ +NoOpData.java

NoOpData.java

package neureka;
+
+import neureka.devices.Device;
+import neureka.devices.host.CPU;
+import neureka.dtype.DataType;
+
+final class NoOpData implements Data<Void>
+{
+    static final NoOpData INSTANCE = new NoOpData();
+
+    private NoOpData() {}
+
+    @Override
+    public Device<Void> owner() {
+        return (Device) CPU.get();
+    }
+
+    @Override
+    public Object getOrNull() {
+        return null;
+    }
+
+    @Override
+    public DataType<Void> dataType() {
+        return DataType.of(Void.class);
+    }
+
+    @Override
+    public int usages() {
+        return 0;
+    }
+}
+
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Shape$1.html b/docs/coverage/test/html/neureka/Shape$1.html index 558d15c38..d22b053b9 100644 --- a/docs/coverage/test/html/neureka/Shape$1.html +++ b/docs/coverage/test/html/neureka/Shape$1.html @@ -1 +1 @@ -Shape.new Shape() {...}

Shape.new Shape() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 10198%2 of 1888%21502006
equals(Object)26296%21487%2901301
hashCode()18100%2100%020301
{...}6100%n/a010101
get(int)5100%n/a010101
size()4100%n/a010101
toString()4100%n/a010101
\ No newline at end of file +Shape.new Shape() {...}

Shape.new Shape() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 10198%2 of 1888%21502006
equals(Object)26296%21487%2901301
hashCode()18100%2100%020301
{...}6100%n/a010101
get(int)5100%n/a010101
size()4100%n/a010101
toString()4100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Shape$2.html b/docs/coverage/test/html/neureka/Shape$2.html index 416b6fefb..5d3b5291f 100644 --- a/docs/coverage/test/html/neureka/Shape$2.html +++ b/docs/coverage/test/html/neureka/Shape$2.html @@ -1 +1 @@ -Shape.new Iterator() {...}

Shape.new Iterator() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 31100%0 of 2100%040403
next()12100%n/a010101
hasNext()10100%2100%020101
{...}9100%n/a010201
\ No newline at end of file +Shape.new Iterator() {...}

Shape.new Iterator() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 31100%0 of 2100%040403
next()12100%n/a010101
hasNext()10100%2100%020101
{...}9100%n/a010201
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Shape.html b/docs/coverage/test/html/neureka/Shape.html index a822e67d0..e087ee770 100644 --- a/docs/coverage/test/html/neureka/Shape.html +++ b/docs/coverage/test/html/neureka/Shape.html @@ -1 +1 @@ -Shape

Shape

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total49 of 25080%10 of 3066%1031838216
filter(Predicate)330%40%335511
slice(int)60%n/a111111
slice(int, int)43288%3562%350401
every(Predicate)41578%2250%231301
any(Predicate)21789%1375%131301
map(Function)26100%2100%020301
toIntArray()20100%2100%020301
count(Predicate)20100%4100%030401
elements()18100%2100%020301
of(Iterable)11100%n/a010301
of(int[])9100%n/a010201
stream()8100%n/a010101
of(List)7100%n/a010101
lambda$of$0(List, Number)7100%n/a010101
of(Stream)6100%n/a010101
iterator()5100%n/a010101
\ No newline at end of file +Shape

Shape

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total49 of 25080%10 of 3066%1031838216
filter(Predicate)330%40%335511
slice(int)60%n/a111111
slice(int, int)43288%3562%350401
every(Predicate)41578%2250%231301
any(Predicate)21789%1375%131301
map(Function)26100%2100%020301
toIntArray()20100%2100%020301
count(Predicate)20100%4100%030401
elements()18100%2100%020301
of(Iterable)11100%n/a010301
of(int[])9100%n/a010201
stream()8100%n/a010101
of(List)7100%n/a010101
lambda$of$0(List, Number)7100%n/a010101
of(Stream)6100%n/a010101
iterator()5100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Shape.java.html b/docs/coverage/test/html/neureka/Shape.java.html index cc6d5a03b..f48f11035 100644 --- a/docs/coverage/test/html/neureka/Shape.java.html +++ b/docs/coverage/test/html/neureka/Shape.java.html @@ -207,4 +207,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Tensor$ImageType.html b/docs/coverage/test/html/neureka/Tensor$ImageType.html index 918f08d8f..db57af31c 100644 --- a/docs/coverage/test/html/neureka/Tensor$ImageType.html +++ b/docs/coverage/test/html/neureka/Tensor$ImageType.html @@ -1 +1 @@ -Tensor.ImageType

Tensor.ImageType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 117100%0 of 0n/a0201702
static {...}102100%n/a0101201
Tensor.ImageType(String, int, int, Class, int)15100%n/a010501
\ No newline at end of file +Tensor.ImageType

Tensor.ImageType

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 162100%0 of 0n/a0201702
static {...}147100%n/a0101201
Tensor.ImageType(String, int, int, Class, int)15100%n/a010501
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Tensor.html b/docs/coverage/test/html/neureka/Tensor.html index a8c86de2b..a4f84a23d 100644 --- a/docs/coverage/test/html/neureka/Tensor.html +++ b/docs/coverage/test/html/neureka/Tensor.html @@ -1 +1 @@ -Tensor

Tensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total246 of 2,91991%37 of 15676%462782842910200
lambda$mapTo$31(Class, Function)2826490%63485%52165601
of(Tensor, char, Tensor, char, Tensor)260%n/a111111
xor(Tensor)260%n/a112211
matMul(Tensor)203462%2250%232501
of(Tensor, char, Tensor)170%n/a111111
sum()151346%1150%122601
min()151346%1150%122601
max()151346%1150%122601
xor(double)100%n/a111111
of(Iterable)90%n/a112211
get(Object)90%n/a111111
relu()70%n/a111111
toString(String)61571%3350%340401
toString(NDPrintSettings)61368%3350%340401
of(Class, Shape, List)60%n/a111111
lambda$shaped$5(List, List)60%n/a111111
update(Component.OwnerChangeRequest)53487%2466%241801
mapTo(Class, Function)51575%1150%121501
get(Number)40%n/a111111
convDot(Tensor)10798%11392%1801501
T()5996%1787%150701
shallowCopy()1083%2250%230201
shallowClone()1083%2250%230201
isUndefined()981%2250%230101
lambda$of$1(Object)990%1150%120101
of(String, Tensor, String, Tensor, String, Tensor, String)102100%n/a010801
concatAt(int, Nda, Nda[])59100%n/a010701
lambda$applyGradient$17(Tensor)50100%2250%230701
of(Class, List)47100%4100%0301401
mean()42100%1150%120501
of(List, List)37100%1150%120701
of(DataType, Shape, List)36100%n/a010401
sum(int)33100%4100%030601
transpose(int, int)28100%2100%020501
multiply(double)28100%1150%120501
of(List, String)27100%2100%020301
plus(Tensor)26100%n/a010201
minus(Tensor)26100%n/a010201
dot(Tensor)26100%n/a010201
conv(Tensor)26100%n/a010201
multiply(Tensor)26100%n/a010201
div(Tensor)26100%n/a010201
mod(Tensor)26100%n/a010201
power(Tensor)26100%n/a010201
of(String, Tensor, char, Tensor, String)25100%n/a010101
concatAt(int, Nda)25100%n/a010601
minus(Object)24100%n/a010501
sum(int[])24100%2100%020401
multiply(Object)24100%n/a010501
getDevice()23100%6100%040601
reshape(int[])20100%n/a010601
permute(int[])20100%n/a010601
power(Object)20100%n/a010201
of(List, Object)18100%n/a010301
of(Shape, Object)18100%n/a010301
backward(Tensor)18100%n/a010301
isLeave()17100%1375%130101
of(String, Tensor, String)16100%n/a010101
of(Shape, List)16100%1150%120301
applyGradient()16100%n/a010401
times(Object)15100%n/a010201
isShallowCopy()14100%n/a010501
isPartialSlice()14100%n/a010501
contains(Tensor)14100%n/a010201
times(Tensor)14100%n/a010201
isEmpty()13100%1375%130101
of(double[])12100%n/a010101
of(double)12100%n/a010101
of(float[])12100%n/a010101
of(float)12100%n/a010101
of(byte[])12100%n/a010101
of(byte)12100%n/a010101
of(int[])12100%n/a010101
of(int)12100%n/a010101
of(long[])12100%n/a010101
of(long)12100%n/a010101
of(short[])12100%n/a010101
of(short)12100%n/a010101
of(boolean[])12100%n/a010101
of(DataType, List, Filler)12100%n/a010201
of(DataType, Shape, Filler)12100%n/a010201
of(String, Number[])12100%n/a010101
like(Tensor)12100%n/a010301
detached()12100%2100%020301
isSlice()11100%n/a010101
sliceCount()11100%n/a010101
isSliceParent()11100%n/a010101
lambda$concatAt$19(int)11100%n/a010101
to(String)10100%n/a010101
mod(int)10100%n/a010101
getAt(Number)10100%n/a010101
lambda$isPartialSlice$12(Relation)10100%n/a010301
lambda$isPartialSlice$11(Tensor)10100%2100%020101
lambda$isShallowCopy$9(Relation)10100%n/a010301
of(Object[])9100%n/a010201
shaped(Shape)9100%n/a010101
backward(double)9100%n/a010201
plus(Object)9100%n/a010101
div(Object)9100%n/a010101
softmax(int)9100%n/a010201
toString(Consumer)9100%2100%020201
lambda$mapTo$24(Function, byte[], Integer)9100%n/a010101
lambda$mapTo$23(Function, short[], Integer)9100%n/a010101
lambda$mapTo$22(Function, float[], Integer)9100%n/a010101
lambda$mapTo$21(Function, double[], Integer)9100%n/a010101
lambda$mapTo$20(Function, int[], Integer)9100%n/a010101
of(Shape, Data)8100%n/a010101
ofRandom(Class, int[])8100%n/a010301
isOutsourced()8100%2100%020101
set(OptimizerFactory)8100%n/a010201
softmax(int[])8100%n/a010101
lambda$mapTo$29(Function, byte[], Integer)8100%n/a010101
lambda$mapTo$28(Function, short[], Integer)8100%n/a010101
lambda$mapTo$27(Function, float[], Integer)8100%n/a010101
lambda$mapTo$26(Function, double[], Integer)8100%n/a010101
lambda$mapTo$25(Function, int[], Integer)8100%n/a010101
lambda$backward$14(double)8100%n/a010101
lambda$of$3(Number)8100%n/a010101
of(Class, List, Object)7100%n/a010101
of(Class, List, List)7100%n/a010101
of(DataType, List, List)7100%n/a010101
of(DataType, Shape, Object)7100%n/a010101
of(DataType, Device, Shape, Object)7100%n/a010101
isBranch()7100%2100%020101
negative()7100%n/a010101
dimtrim()7100%n/a010101
sig()7100%n/a010101
tanh()7100%n/a010101
sin()7100%n/a010101
cos()7100%n/a010101
ln()7100%n/a010101
softplus()7100%n/a010101
exp()7100%n/a010101
sqrt()7100%n/a010101
log10()7100%n/a010101
cbrt()7100%n/a010101
abs()7100%n/a010101
neg()7100%n/a010101
softmax()7100%n/a010101
sigmoid()7100%n/a010101
getAt(Object[])7100%n/a010201
lambda$mapTo$30(Function, Object[], Integer)7100%n/a010101
lambda$isShallowCopy$8(Tensor)7100%n/a010101
of(Class, Shape, Arg.Seed)6100%n/a010101
of(Shape, double)6100%n/a010101
of(Shape, float)6100%n/a010101
of(Class, Shape, Object)6100%n/a010101
of(Class, Shape, Number)6100%n/a010101
ofAny(Class, Shape, Object)6100%n/a010101
of(Class, Shape, Filler)6100%n/a010101
of(String, List)6100%n/a010101
of(String, boolean, List)6100%n/a010101
of(String, Tensor)6100%n/a010101
of(String, Tensor[])6100%n/a010101
of(String, boolean, Tensor[])6100%n/a010101
backward()6100%n/a010201
getGradient()6100%n/a010101
getGraphNode()6100%n/a010101
getAt(int)6100%n/a010101
map(Function)6100%n/a010101
of(List)5100%n/a010101
of(Class)5100%n/a010101
of(Shape, double[])5100%n/a010101
of(Shape, int[])5100%n/a010101
of(Shape, byte[])5100%n/a010101
of(Shape, long[])5100%n/a010101
of(Shape, short[])5100%n/a010101
of(Shape, float[])5100%n/a010101
of(Shape, boolean[])5100%n/a010101
of(DataType, Shape)5100%n/a010101
of(DataType, NDConstructor, Data)5100%n/a010101
lambda$of$2(DataConverter, Class, Object)5100%n/a010101
newInstance()4100%n/a010101
shaped(int[])4100%n/a010101
belongsToGraph()4100%n/a010101
hasGradient()4100%n/a010101
getFrame()4100%n/a010101
rem(int)4100%n/a010101
get(int[])4100%n/a010101
get(Object[])4100%n/a010101
get(int)4100%n/a010101
lambda$applyGradient$16(Optimizer)4100%n/a010101
lambda$shaped$6(Shape, List)4100%n/a010101
ofDoubles()3100%n/a010101
ofFloats()3100%n/a010101
ofInts()3100%n/a010101
ofShorts()3100%n/a010101
ofBytes()3100%n/a010101
mut()3100%n/a010101
gradient()3100%n/a010101
graphNode()3100%n/a010101
frame()3100%n/a010101
getT()3100%n/a010101
lambda$of$4(int)3100%n/a010101
lambda$getGraphNode$18(GraphNode)100%n/a010101
lambda$getGradient$15(Tensor)100%n/a010101
lambda$backward$13(Tensor)100%n/a010101
lambda$isPartialSlice$10(Relation)100%n/a010101
lambda$isShallowCopy$7(Relation)100%n/a010101
lambda$of$0(Object)100%n/a010101
\ No newline at end of file +Tensor

Tensor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total248 of 2,91991%38 of 15675%472782842910200
lambda$mapTo$31(Class, Function)2826490%63485%52165601
of(Tensor, char, Tensor, char, Tensor)260%n/a111111
xor(Tensor)260%n/a112211
matMul(Tensor)203462%2250%232501
of(Tensor, char, Tensor)170%n/a111111
sum()151346%1150%122601
min()151346%1150%122601
max()151346%1150%122601
xor(double)100%n/a111111
of(Iterable)90%n/a112211
get(Object)90%n/a111111
relu()70%n/a111111
toString(String)61571%3350%340401
toString(NDPrintSettings)61368%3350%340401
of(Class, Shape, List)60%n/a111111
lambda$shaped$5(List, List)60%n/a111111
update(Component.OwnerChangeRequest)53487%2466%241801
mapTo(Class, Function)51575%1150%121501
get(Number)40%n/a111111
convDot(Tensor)10798%11392%1801501
T()5996%1787%150701
shallowCopy()1083%2250%230201
shallowClone()1083%2250%230201
isUndefined()981%2250%230101
toString(Consumer)777%1150%120201
lambda$of$1(Object)990%1150%120101
of(String, Tensor, String, Tensor, String, Tensor, String)102100%n/a010801
concatAt(int, Nda, Nda[])59100%n/a010701
lambda$applyGradient$17(Tensor)50100%2250%230701
of(Class, List)47100%4100%0301401
mean()42100%1150%120501
of(List, List)37100%1150%120701
of(DataType, Shape, List)36100%n/a010401
sum(int)33100%4100%030601
transpose(int, int)28100%2100%020501
multiply(double)28100%1150%120501
of(List, String)27100%2100%020301
plus(Tensor)26100%n/a010201
minus(Tensor)26100%n/a010201
dot(Tensor)26100%n/a010201
conv(Tensor)26100%n/a010201
multiply(Tensor)26100%n/a010201
div(Tensor)26100%n/a010201
mod(Tensor)26100%n/a010201
power(Tensor)26100%n/a010201
of(String, Tensor, char, Tensor, String)25100%n/a010101
concatAt(int, Nda)25100%n/a010601
minus(Object)24100%n/a010501
sum(int[])24100%2100%020401
multiply(Object)24100%n/a010501
getDevice()23100%6100%040601
reshape(int[])20100%n/a010601
permute(int[])20100%n/a010601
power(Object)20100%n/a010201
of(List, Object)18100%n/a010301
of(Shape, Object)18100%n/a010301
backward(Tensor)18100%n/a010301
isLeave()17100%1375%130101
of(String, Tensor, String)16100%n/a010101
of(Shape, List)16100%1150%120301
applyGradient()16100%n/a010401
times(Object)15100%n/a010201
isShallowCopy()14100%n/a010501
isPartialSlice()14100%n/a010501
contains(Tensor)14100%n/a010201
times(Tensor)14100%n/a010201
isEmpty()13100%1375%130101
of(double[])12100%n/a010101
of(double)12100%n/a010101
of(float[])12100%n/a010101
of(float)12100%n/a010101
of(byte[])12100%n/a010101
of(byte)12100%n/a010101
of(int[])12100%n/a010101
of(int)12100%n/a010101
of(long[])12100%n/a010101
of(long)12100%n/a010101
of(short[])12100%n/a010101
of(short)12100%n/a010101
of(boolean[])12100%n/a010101
of(DataType, List, Filler)12100%n/a010201
of(DataType, Shape, Filler)12100%n/a010201
of(String, Number[])12100%n/a010101
like(Tensor)12100%n/a010301
detached()12100%2100%020301
isSlice()11100%n/a010101
sliceCount()11100%n/a010101
isSliceParent()11100%n/a010101
lambda$concatAt$19(int)11100%n/a010101
to(String)10100%n/a010101
mod(int)10100%n/a010101
getAt(Number)10100%n/a010101
lambda$isPartialSlice$12(Relation)10100%n/a010301
lambda$isPartialSlice$11(Tensor)10100%2100%020101
lambda$isShallowCopy$9(Relation)10100%n/a010301
of(Object[])9100%n/a010201
shaped(Shape)9100%n/a010101
backward(double)9100%n/a010201
plus(Object)9100%n/a010101
div(Object)9100%n/a010101
softmax(int)9100%n/a010201
lambda$mapTo$24(Function, byte[], Integer)9100%n/a010101
lambda$mapTo$23(Function, short[], Integer)9100%n/a010101
lambda$mapTo$22(Function, float[], Integer)9100%n/a010101
lambda$mapTo$21(Function, double[], Integer)9100%n/a010101
lambda$mapTo$20(Function, int[], Integer)9100%n/a010101
of(Shape, Data)8100%n/a010101
ofRandom(Class, int[])8100%n/a010301
isOutsourced()8100%2100%020101
set(OptimizerFactory)8100%n/a010201
softmax(int[])8100%n/a010101
lambda$mapTo$29(Function, byte[], Integer)8100%n/a010101
lambda$mapTo$28(Function, short[], Integer)8100%n/a010101
lambda$mapTo$27(Function, float[], Integer)8100%n/a010101
lambda$mapTo$26(Function, double[], Integer)8100%n/a010101
lambda$mapTo$25(Function, int[], Integer)8100%n/a010101
lambda$backward$14(double)8100%n/a010101
lambda$of$3(Number)8100%n/a010101
of(Class, List, Object)7100%n/a010101
of(Class, List, List)7100%n/a010101
of(DataType, List, List)7100%n/a010101
of(DataType, Shape, Object)7100%n/a010101
of(DataType, Device, Shape, Object)7100%n/a010101
isBranch()7100%2100%020101
negative()7100%n/a010101
dimtrim()7100%n/a010101
sig()7100%n/a010101
tanh()7100%n/a010101
sin()7100%n/a010101
cos()7100%n/a010101
ln()7100%n/a010101
softplus()7100%n/a010101
exp()7100%n/a010101
sqrt()7100%n/a010101
log10()7100%n/a010101
cbrt()7100%n/a010101
abs()7100%n/a010101
neg()7100%n/a010101
softmax()7100%n/a010101
sigmoid()7100%n/a010101
getAt(Object[])7100%n/a010201
lambda$mapTo$30(Function, Object[], Integer)7100%n/a010101
lambda$isShallowCopy$8(Tensor)7100%n/a010101
of(Class, Shape, Arg.Seed)6100%n/a010101
of(Shape, double)6100%n/a010101
of(Shape, float)6100%n/a010101
of(Class, Shape, Object)6100%n/a010101
of(Class, Shape, Number)6100%n/a010101
ofAny(Class, Shape, Object)6100%n/a010101
of(Class, Shape, Filler)6100%n/a010101
of(String, List)6100%n/a010101
of(String, boolean, List)6100%n/a010101
of(String, Tensor)6100%n/a010101
of(String, Tensor[])6100%n/a010101
of(String, boolean, Tensor[])6100%n/a010101
backward()6100%n/a010201
getGradient()6100%n/a010101
getGraphNode()6100%n/a010101
getAt(int)6100%n/a010101
map(Function)6100%n/a010101
of(List)5100%n/a010101
of(Class)5100%n/a010101
of(Shape, double[])5100%n/a010101
of(Shape, int[])5100%n/a010101
of(Shape, byte[])5100%n/a010101
of(Shape, long[])5100%n/a010101
of(Shape, short[])5100%n/a010101
of(Shape, float[])5100%n/a010101
of(Shape, boolean[])5100%n/a010101
of(DataType, Shape)5100%n/a010101
of(DataType, NDConstructor, Data)5100%n/a010101
lambda$of$2(DataConverter, Class, Object)5100%n/a010101
newInstance()4100%n/a010101
shaped(int[])4100%n/a010101
belongsToGraph()4100%n/a010101
hasGradient()4100%n/a010101
getFrame()4100%n/a010101
rem(int)4100%n/a010101
get(int[])4100%n/a010101
get(Object[])4100%n/a010101
get(int)4100%n/a010101
lambda$applyGradient$16(Optimizer)4100%n/a010101
lambda$shaped$6(Shape, List)4100%n/a010101
ofDoubles()3100%n/a010101
ofFloats()3100%n/a010101
ofInts()3100%n/a010101
ofShorts()3100%n/a010101
ofBytes()3100%n/a010101
mut()3100%n/a010101
gradient()3100%n/a010101
graphNode()3100%n/a010101
frame()3100%n/a010101
getT()3100%n/a010101
lambda$of$4(int)3100%n/a010101
lambda$getGraphNode$18(GraphNode)100%n/a010101
lambda$getGradient$15(Tensor)100%n/a010101
lambda$backward$13(Tensor)100%n/a010101
lambda$isPartialSlice$10(Relation)100%n/a010101
lambda$isShallowCopy$7(Relation)100%n/a010101
lambda$of$0(Object)100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/Tensor.java.html b/docs/coverage/test/html/neureka/Tensor.java.html index 39baf3e15..5cacea591 100644 --- a/docs/coverage/test/html/neureka/Tensor.java.html +++ b/docs/coverage/test/html/neureka/Tensor.java.html @@ -2642,7 +2642,7 @@ /** {@inheritDoc} */ @Override default String toString( Consumer<NDPrintSettings> configurator ) { - if ( this.isDeleted() ) return "deleted"; + if ( this.isDeleted() ) return "deleted"; return Nda.super.toString( configurator ); } @@ -2704,4 +2704,4 @@ } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorConstructor$Args.html b/docs/coverage/test/html/neureka/TensorConstructor$Args.html new file mode 100644 index 000000000..b6f221a7e --- /dev/null +++ b/docs/coverage/test/html/neureka/TensorConstructor$Args.html @@ -0,0 +1 @@ +TensorConstructor.Args

TensorConstructor.Args

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 25100%0 of 0n/a070707
setIsVirtual(boolean)5100%n/a010101
setConf(NDConfiguration)4100%n/a010101
setData(Data)4100%n/a010101
TensorConstructor.Args()3100%n/a010101
getConf()3100%n/a010101
getData()3100%n/a010101
isVirtual()3100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorConstructor.html b/docs/coverage/test/html/neureka/TensorConstructor.html index df6639e80..740c843fe 100644 --- a/docs/coverage/test/html/neureka/TensorConstructor.html +++ b/docs/coverage/test/html/neureka/TensorConstructor.html @@ -1 +1 @@ -TensorConstructor

TensorConstructor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total5 of 25498%3 of 2286%31805007
newSeeded(Class, Arg.Seed)43589%1150%120801
newPopulatedFromOne(Object, Class)14897%1583%140801
tryConstructing(DataType, Object)97100%11191%1701801
TensorConstructor(Device, NDConstructor, TensorConstructor.API)32100%n/a010701
unpopulated(boolean, boolean, DataType)24100%2100%020501
constructTrusted(Data)12100%n/a010301
static {...}1100%n/a010101
\ No newline at end of file +TensorConstructor

TensorConstructor

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 26398%2 of 2090%21705307
newSeeded(Class, Arg.Seed)43790%1150%120801
tryConstructing(DataType, Object)103100%11191%1702001
newPopulatedFromOne(Object, Class)46100%4100%030801
TensorConstructor(Device, NDConstructor, TensorConstructor.Args)32100%n/a010701
unpopulated(boolean, boolean, DataType)26100%2100%020601
constructTrusted(Data)14100%n/a010301
static {...}1100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorConstructor.java.html b/docs/coverage/test/html/neureka/TensorConstructor.java.html index 9063c30e6..7c24e4af5 100644 --- a/docs/coverage/test/html/neureka/TensorConstructor.java.html +++ b/docs/coverage/test/html/neureka/TensorConstructor.java.html @@ -23,17 +23,7 @@ */ final class TensorConstructor { - /** - * An interface defining methods for configuring a {@link Tensor} - * in the making... - */ - public interface API { - void setConf( NDConfiguration conf ); - void setData( Data<?> o ); - void setIsVirtual( boolean isVirtual ); - } - - private final API _API; + private final Args _Args; private final Device<Object> _targetDevice; private final NDConstructor _ndConstructor; @@ -41,15 +31,15 @@ * * @param targetDevice The {@link Device} to be used for the construction of the {@link Tensor} * @param ndConstructor A producer of the {@link NDConfiguration} interface implementation. - * @param API An implementation of the {@link API} interface. + * @param Args An implementation of the {@link Args} interface. */ - public TensorConstructor(Device<?> targetDevice, NDConstructor ndConstructor, API API ) { - LogUtil.nullArgCheck( targetDevice, "targetDevice", Device.class, "Cannot construct a tensor without target device." ); - LogUtil.nullArgCheck( ndConstructor, "ndConstructor", NDConstructor.class, "Cannot construct tensor without shape information." ); - _targetDevice = (Device<Object>) targetDevice; - _ndConstructor = ndConstructor; - _API = API; - } + public TensorConstructor(Device<?> targetDevice, NDConstructor ndConstructor, Args Args) { + LogUtil.nullArgCheck( targetDevice, "targetDevice", Device.class, "Cannot construct a tensor without target device." ); + LogUtil.nullArgCheck( ndConstructor, "ndConstructor", NDConstructor.class, "Cannot construct tensor without shape information." ); + _targetDevice = (Device<Object>) targetDevice; + _ndConstructor = ndConstructor; + _Args = Args; + } /** * Constructs the tensor without any initial (filled) {@link Data}. @@ -57,73 +47,103 @@ * @param makeVirtual A flag determining if the tensor should be actual or virtual (not fully allocated). * @param autoAllocate Determines if the underlying data array should be allocated or not. */ - void unpopulated( + Args unpopulated( boolean makeVirtual, boolean autoAllocate, DataType<?> type ) { - NDConfiguration ndc = _ndConstructor.produceNDC( makeVirtual ); - _API.setIsVirtual( makeVirtual ); - _API.setConf( ndc ); - if ( autoAllocate ) _API.setData( _targetDevice.allocate( type, ndc ) ); - } - - public void constructTrusted( Data<?> data ) { - _API.setConf( _ndConstructor.produceNDC( false ) ); - _API.setData( data ); - } - - public void tryConstructing( + NDConfiguration ndc = _ndConstructor.produceNDC( makeVirtual ); + _Args.setIsVirtual( makeVirtual ); + _Args.setConf( ndc ); + if ( autoAllocate ) + _Args.setData( _targetDevice.allocate( type, ndc ) ); + return _Args; + } + + public Args constructTrusted(Data<?> data ) { + _Args.setConf( _ndConstructor.produceNDC( false ) ); + _Args.setData( data ); + return _Args; + } + + public Args tryConstructing( DataType<?> dataType, Object data ) { - LogUtil.nullArgCheck( _ndConstructor, "ndConstructor", NDConstructor.class ); - LogUtil.nullArgCheck( _ndConstructor.getShape(), "shape", int[].class ); - LogUtil.nullArgCheck( dataType, "dataType", DataType.class ); - LogUtil.nullArgCheck( data, "data", Object.class ); - - int size = _ndConstructor.getSize(); - if ( data instanceof Object[] ) - data = CPU.get().allocate( dataType.getItemTypeClass(), size, data ).getOrNull(); + LogUtil.nullArgCheck( _ndConstructor, "ndConstructor", NDConstructor.class ); + LogUtil.nullArgCheck( _ndConstructor.getShape(), "shape", int[].class ); + LogUtil.nullArgCheck( dataType, "dataType", DataType.class ); + LogUtil.nullArgCheck( data, "data", Object.class ); + + int size = _ndConstructor.getSize(); + if ( data instanceof Object[] ) + data = CPU.get().allocate( dataType.getItemTypeClass(), size, data ).getOrNull(); else { - boolean isDefinitelyScalarValue = ( dataType == DataType.of(data.getClass()) ); + boolean isDefinitelyScalarValue = ( dataType == DataType.of(data.getClass()) ); - if ( data instanceof Number && !isDefinitelyScalarValue ) { - data = DataConverter.get().convert( data, dataType.getItemTypeClass() ); - isDefinitelyScalarValue = true; + if ( data instanceof Number && !isDefinitelyScalarValue ) { + data = DataConverter.get().convert( data, dataType.getItemTypeClass() ); + isDefinitelyScalarValue = true; } - if ( isDefinitelyScalarValue ) // This means that "data" is a single value! - if ( newPopulatedFromOne( data, dataType.getItemTypeClass() ) ) return; + if ( isDefinitelyScalarValue ) { // This means that "data" is a single value! + newPopulatedFromOne( data, dataType.getItemTypeClass() ); + if ( data != null ) + return _Args; + } } - NDConfiguration ndc = _ndConstructor.produceNDC( false ); - _API.setIsVirtual( false ); - _API.setConf( ndc ); - _API.setData( _targetDevice.allocateFromAll( dataType, ndc, data) ); - } + NDConfiguration ndc = _ndConstructor.produceNDC( false ); + _Args.setIsVirtual( false ); + _Args.setConf( ndc ); + _Args.setData( _targetDevice.allocateFromAll( dataType, ndc, data) ); + return _Args; + } - public boolean newPopulatedFromOne( Object singleItem, Class<?> type ) + public Args newPopulatedFromOne(Object singleItem, Class<?> type ) { - int size = _ndConstructor.getSize(); - NDConfiguration ndc = _ndConstructor.produceNDC(_ndConstructor.getSize() > 1); - DataType<Object> dataType = (DataType<Object>) DataType.of( type ); - Data<?> array = _targetDevice.allocateFromOne( dataType, ndc, singleItem ); - _API.setIsVirtual( size > 1 ); - _API.setConf( ndc ); - _API.setData( array ); - return singleItem != null; + int size = _ndConstructor.getSize(); + NDConfiguration ndc = _ndConstructor.produceNDC(_ndConstructor.getSize() > 1); + DataType<Object> dataType = (DataType<Object>) DataType.of( type ); + Data<?> array = _targetDevice.allocateFromOne( dataType, ndc, singleItem ); + _Args.setIsVirtual( size > 1 ); + _Args.setConf( ndc ); + _Args.setData( array ); + return _Args; } - public <V> void newSeeded( Class<V> valueType, Arg.Seed seed ) + public <V> Args newSeeded(Class<V> valueType, Arg.Seed seed ) { - NDConfiguration ndc = _ndConstructor.produceNDC( false ); - Data<?> data = _targetDevice.allocate( DataType.of( valueType ), ndc ); - Object out = CPURandomization.fillRandomly( data.getOrNull(), seed ); - assert out == data.getOrNull(); - _API.setIsVirtual( false ); - _API.setConf( ndc ); - _API.setData( data ); - } + NDConfiguration ndc = _ndConstructor.produceNDC( false ); + Data<?> data = _targetDevice.allocate( DataType.of( valueType ), ndc ); + Object out = CPURandomization.fillRandomly( data.getOrNull(), seed ); + assert out == data.getOrNull(); + _Args.setIsVirtual( false ); + _Args.setConf( ndc ); + _Args.setData( data ); + return _Args; + } + + /** + * An interface defining methods for configuring a {@link Tensor} + * in the making... + */ + static class Args { + private NDConfiguration _conf; + private Data<?> _data; + private Boolean _isVirtual; + + public void setConf( NDConfiguration conf ) { _conf = conf; } + + public void setData( Data<?> o ) { _data = o; } + + public void setIsVirtual( boolean isVirtual ) { _isVirtual = isVirtual; } + + public NDConfiguration getConf() { return _conf; } + + public Data<?> getData() { return _data; } + + public Boolean isVirtual() { return _isVirtual; } + } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorImpl$1.html b/docs/coverage/test/html/neureka/TensorImpl$1.html index 895266498..df0dcafb8 100644 --- a/docs/coverage/test/html/neureka/TensorImpl$1.html +++ b/docs/coverage/test/html/neureka/TensorImpl$1.html @@ -1 +1 @@ -TensorImpl.new Data() {...}

TensorImpl.new Data() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total2 of 1788%0 of 0n/a151515
usages()20%n/a111111
{...}6100%n/a010101
dataType()5100%n/a010101
owner()2100%n/a010101
getOrNull()2100%n/a010101
\ No newline at end of file +TensorImpl.new Data() {...}

TensorImpl.new Data() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total4 of 1776%0 of 0n/a252525
owner()20%n/a111111
usages()20%n/a111111
{...}6100%n/a010101
dataType()5100%n/a010101
getOrNull()2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorImpl$2.html b/docs/coverage/test/html/neureka/TensorImpl$2.html index 2c1576405..535d5b6a4 100644 --- a/docs/coverage/test/html/neureka/TensorImpl$2.html +++ b/docs/coverage/test/html/neureka/TensorImpl$2.html @@ -1 +1 @@ -TensorImpl.new MutateNda.Item() {...}

TensorImpl.new MutateNda.Item() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total20 of 6368%5 of 837%61021026
hashCode()100%20%221111
equals(Object)61976%3350%340501
toString()40%n/a111111
{...}9100%n/a010101
set(Object)9100%n/a010101
orElseNull()6100%n/a010101
\ No newline at end of file +TensorImpl.new MutateNda.Item() {...}

TensorImpl.new MutateNda.Item() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total20 of 6368%5 of 837%61021026
hashCode()100%20%221111
equals(Object)61976%3350%340501
toString()40%n/a111111
{...}9100%n/a010101
set(Object)9100%n/a010101
orElseNull()6100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorImpl$3.html b/docs/coverage/test/html/neureka/TensorImpl$3.html index d95b6d243..da426ab57 100644 --- a/docs/coverage/test/html/neureka/TensorImpl$3.html +++ b/docs/coverage/test/html/neureka/TensorImpl$3.html @@ -1 +1 @@ -TensorImpl.new Iterator() {...}

TensorImpl.new Iterator() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 44100%0 of 2100%040803
next()18100%n/a010401
{...}17100%n/a010301
hasNext()9100%2100%020101
\ No newline at end of file +TensorImpl.new Iterator() {...}

TensorImpl.new Iterator() {...}

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total0 of 44100%0 of 2100%040803
next()18100%n/a010401
{...}17100%n/a010301
hasNext()9100%2100%020101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorImpl.html b/docs/coverage/test/html/neureka/TensorImpl.html index 82d5554f6..12cd68909 100644 --- a/docs/coverage/test/html/neureka/TensorImpl.html +++ b/docs/coverage/test/html/neureka/TensorImpl.html @@ -1 +1 @@ -TensorImpl

TensorImpl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total346 of 2,94888%42 of 28685%51258535119114
_of(Object[])4516178%62278%61553501
modAssign(Tensor)260%n/a112211
assign(Object)240%n/a115511
_of(NDConstructor, Device, DataType, Object)204368%3770%3641401
putAt(int[], Object)196276%3975%3741501
upcast(Class)191340%1150%121401
setItems(Object)164573%2466%2421201
asImage(Tensor.ImageType)1414491%1375%1411701
_putAt(Tensor, Tensor)144475%6100%0461601
_setDataAt(int, Object)142058%1375%131501
withLabel(String)141348%1150%121401
_of(Iterable)140%n/a113311
_removeOrReject(Component)121250%1375%135901
lambda$setIsVirtual$1(Object)110%n/a113311
putAt(List, Object)101661%1150%121301
putAt(List, Nda)83581%4450%451701
setIsVirtual(boolean)75187%2880%2611601
lambda$addToGradient$19(Tensor, Device)7646%n/a013601
lambda$setIsVirtual$3(Relation)60%n/a111111
lambda$setIsVirtual$2(Relation)60%n/a111111
_clone(boolean)55591%1375%1311101
setRqsGradient(boolean)53587%1787%151801
_of(NDConstructor, DataType, Data)52080%1150%121501
lambda$_toType$21(Class, TensorImpl)50%n/a111111
lambda$setIsVirtual$4(Tensor)50%n/a111111
toString()41680%2466%240401
lambda$setIsVirtual$5(Relation)40%n/a111111
_delete()23895%1150%1201001
putAt(Map, Nda)21688%1150%120301
_getData(boolean)21184%1150%120301
lambda$setRqsGradient$0(GraphNode)787%1150%120101
getAt(List)152100%12796%11501901
_checkRankForImageConversion(Tensor.ImageType, Class, int[])119100%10100%0601601
makeFit(int[], int[])96100%14100%0801701
labelAxes(String[][])82100%11191%1701401
asType(Class)61100%1787%150801
_incrementVersionBecauseOf(ExecutionCall)51100%2880%2601001
_backward(LazyRef)37100%4100%030801
setGradientApplyRequested(boolean)36100%8100%050801
_extractCommonType(Object[])35100%8100%050601
_of(NDConstructor, DataType, Filler)35100%n/a010701
labelAxes(List)35100%2100%020501
label(String)35100%2100%020501
labelAxes(Map)33100%1375%130501
getRawItems()32100%6100%040401
_of(Class, NDConstructor, Arg.Seed)31100%n/a010601
get(Class)27100%4100%030601
assign(Nda)27100%n/a010201
withLabels(String[][])27100%2100%020401
withLabels(List)27100%1150%120401
withLabels(Map)27100%2100%020401
addToGradient(Tensor)27100%2100%020701
_of(List)26100%n/a010701
_of(NDConstructor, DataType)26100%n/a010501
timesAssign(Tensor)26100%n/a010201
divAssign(Tensor)26100%n/a010201
plusAssign(Tensor)26100%n/a010201
minusAssign(Tensor)26100%n/a010201
_toType(Class)26100%2100%020501
minusAssign(Object)24100%n/a010501
_setIsIntermediate(boolean)23100%4100%030401
_setRqsGradient(boolean)22100%4100%030401
_setIsVirtual(boolean)22100%4100%030401
isCase(Tensor)22100%n/a010401
timesAssign(Object)20100%n/a010201
getAt(Map)19100%n/a010201
lambda$_toType$20(Class, DataType)18100%n/a010401
getAt(int[])17100%n/a010201
lambda$addToGradient$17(Tensor, Tensor)16100%n/a010501
toType(Class)14100%n/a010201
_putAtCheckFor(Tensor)14100%2100%020501
setItemAt(int, Object)14100%n/a010401
lambda$_delete$6(GraphNode)14100%2100%020501
TensorImpl()12100%n/a010401
incrementVersion(ExecutionCall)11100%n/a010301
lambda$addToGradient$18(Tensor, Tensor)11100%n/a010201
rqsGradient()10100%2100%020101
isIntermediate()10100%2100%020101
isVirtual()10100%2100%020101
isDeleted()10100%2100%020101
gradientApplyRequested()10100%2100%020101
_writeImgData(DataBuffer, BufferedImage)10100%n/a010301
setDataAt(int, Object)9100%n/a010301
iterator()9100%n/a010201
to(Device)9100%2100%020201
lambda$isCase$12(Tensor, Relation)8100%n/a010101
lambda$_backward$9(LazyRef)8100%n/a010101
getDataAs(Class)7100%n/a010101
getDataAt(int)7100%n/a010101
putAt(int, Object)7100%n/a010101
getRawData()7100%n/a010201
at(int[])6100%n/a010101
toLayout(NDConfiguration.Layout)6100%n/a010201
detach()6100%n/a010101
lambda$getAt$13(Object)6100%2100%020101
lambda$_backward$10(LazyRef, GraphNode)6100%n/a010101
getMut()5100%n/a010201
setNDConf(NDConfiguration)5100%n/a010101
setData(Data)5100%n/a010201
slice()5100%n/a010101
lambda$_delete$8(TensorImpl)5100%n/a010101
lambda$_delete$7(Device)5100%n/a010101
setIsIntermediate(boolean)4100%n/a010101
deepCopy()4100%n/a010101
deepClone()4100%n/a010101
lambda$putAt$15(Object)4100%n/a010101
lambda$isCase$11(Tensor, Tensor)4100%n/a010101
static {...}4100%n/a010201
getVersion()3100%n/a010101
delete()3100%n/a010101
getData()3100%n/a010101
lambda$_checkRankForImageConversion$16(Integer)3100%n/a010101
lambda$putAt$14(Object)3100%n/a010101
_setOrReject(Component)2100%n/a010101
\ No newline at end of file +TensorImpl

TensorImpl

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethods
Total380 of 2,98887%53 of 29882%612666052010116
_of(Object[])4515877%62278%61553201
_putAt(Tensor, Tensor)322644%4233%34101601
modAssign(Tensor)260%n/a112211
assign(Object)240%n/a115511
_of(NDConstructor, Device, DataType, Object)204066%3770%3641201
putAt(int[], Object)196276%3975%3741501
upcast(Class)191340%1150%121401
_removeOrReject(Component)19520%3125%237901
setItems(Object)164573%2466%2421201
asImage(Tensor.ImageType)1414491%1375%1411701
_setDataAt(int, Object)142058%1375%131501
withLabel(String)141348%1150%121401
_of(Iterable)140%n/a113311
lambda$setIsVirtual$1(Object)110%n/a113311
putAt(List, Object)101661%1150%121301
putAt(List, Nda)83581%4450%451701
setIsVirtual(boolean)75488%2880%2611801
lambda$addToGradient$19(Tensor, Device)7646%n/a013601
lambda$setIsVirtual$3(Relation)60%n/a111111
lambda$setIsVirtual$2(Relation)60%n/a111111
_clone(boolean)55591%1375%1311101
setRqsGradient(boolean)53587%1787%151801
_of(NDConstructor, DataType, Data)51777%1150%121301
lambda$_toType$21(Class, TensorImpl)50%n/a111111
lambda$_delete$7(Device)50%n/a111111
lambda$setIsVirtual$4(Tensor)50%n/a111111
toString()41680%2466%240401
lambda$setIsVirtual$5(Relation)40%n/a111111
_setState(TensorConstructor.Args)32388%3350%3411001
_delete()23895%1150%1201001
putAt(Map, Nda)21688%1150%120301
_getData(boolean)21184%1150%120301
getRawItems()3196%1583%140401
lambda$setRqsGradient$0(GraphNode)787%1150%120101
getAt(List)152100%12796%11501901
_checkRankForImageConversion(Tensor.ImageType, Class, int[])119100%10100%0601601
makeFit(int[], int[])96100%14100%0801701
labelAxes(String[][])82100%11191%1701401
asType(Class)61100%1787%150801
_incrementVersionBecauseOf(ExecutionCall)51100%2880%2601001
_backward(LazyRef)37100%4100%030801
setGradientApplyRequested(boolean)36100%8100%050801
_extractCommonType(Object[])35100%8100%050601
labelAxes(List)35100%2100%020501
label(String)35100%2100%020501
_of(NDConstructor, DataType, Filler)34100%n/a010601
labelAxes(Map)33100%1375%130501
TensorImpl(TensorConstructor.Args)31100%1583%1401201
_of(Class, NDConstructor, Arg.Seed)28100%n/a010401
get(Class)27100%4100%030601
assign(Nda)27100%n/a010201
withLabels(String[][])27100%2100%020401
withLabels(List)27100%1150%120401
withLabels(Map)27100%2100%020401
addToGradient(Tensor)27100%2100%020701
timesAssign(Tensor)26100%n/a010201
divAssign(Tensor)26100%n/a010201
plusAssign(Tensor)26100%n/a010201
minusAssign(Tensor)26100%n/a010201
_toType(Class)26100%2100%020501
minusAssign(Object)24100%n/a010501
_of(NDConstructor, DataType)23100%n/a010301
_setIsIntermediate(boolean)23100%4100%030401
_setRqsGradient(boolean)22100%4100%030401
_setIsVirtual(boolean)22100%4100%030401
isCase(Tensor)22100%n/a010401
_of(List)21100%n/a010501
timesAssign(Object)20100%n/a010201
getAt(Map)19100%n/a010201
lambda$_toType$20(Class, DataType)18100%n/a010401
getAt(int[])17100%n/a010201
lambda$addToGradient$17(Tensor, Tensor)16100%n/a010501
toType(Class)14100%n/a010201
_putAtCheckFor(Tensor)14100%2100%020501
setItemAt(int, Object)14100%n/a010401
lambda$_delete$6(GraphNode)14100%2100%020501
TensorImpl()12100%n/a010401
lambda$addToGradient$18(Tensor, Tensor)12100%n/a010201
incrementVersion(ExecutionCall)11100%n/a010301
rqsGradient()10100%2100%020101
isIntermediate()10100%2100%020101
isVirtual()10100%2100%020101
isDeleted()10100%2100%020101
gradientApplyRequested()10100%2100%020101
_writeImgData(DataBuffer, BufferedImage)10100%n/a010301
setDataAt(int, Object)9100%n/a010301
iterator()9100%n/a010201
to(Device)9100%2100%020201
lambda$isCase$12(Tensor, Relation)8100%n/a010101
lambda$_backward$9(LazyRef)8100%n/a010101
getDataAs(Class)7100%n/a010101
getDataAt(int)7100%n/a010101
putAt(int, Object)7100%n/a010101
getRawData()7100%n/a010201
at(int[])6100%n/a010101
toLayout(NDConfiguration.Layout)6100%n/a010201
detach()6100%n/a010101
lambda$getAt$13(Object)6100%2100%020101
lambda$_backward$10(LazyRef, GraphNode)6100%n/a010101
getMut()5100%n/a010201
setNDConf(NDConfiguration)5100%n/a010101
setData(Data)5100%n/a010201
slice()5100%n/a010101
lambda$_delete$8(TensorImpl)5100%n/a010101
setIsIntermediate(boolean)4100%n/a010101
deepCopy()4100%n/a010101
deepClone()4100%n/a010101
lambda$putAt$15(Object)4100%n/a010101
lambda$isCase$11(Tensor, Tensor)4100%n/a010101
static {...}4100%n/a010201
getVersion()3100%n/a010101
delete()3100%n/a010101
getData()3100%n/a010101
lambda$_checkRankForImageConversion$16(Integer)3100%n/a010101
lambda$putAt$14(Object)3100%n/a010101
_setOrReject(Component)2100%n/a010101
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/TensorImpl.java.html b/docs/coverage/test/html/neureka/TensorImpl.java.html index d2950289c..8d16d2dbf 100644 --- a/docs/coverage/test/html/neureka/TensorImpl.java.html +++ b/docs/coverage/test/html/neureka/TensorImpl.java.html @@ -114,25 +114,22 @@ { if ( args == null || args.length == 0 ) return new TensorImpl<>(); if ( args.length == 1 ) { - TensorImpl<T> t = new TensorImpl<>(); - boolean success = constructFor(t, CPU.get(), NDConstructor.of(1)).newPopulatedFromOne( args[ 0 ], args[ 0 ].getClass() ); - if ( !success ) { - String message = "Cannot create tensor from argument of type '" + args[ 0 ].getClass().getName() + "'!"; - _LOG.error( message ); - throw new IllegalArgumentException( message ); + TensorImpl<T> t = new TensorImpl<>(constructFor(CPU.get(), NDConstructor.of(1)).newPopulatedFromOne( args[ 0 ], args[ 0 ].getClass() )); + if ( args[ 0 ] == null ) { + String message = "Cannot create tensor from argument of type '" + args[ 0 ].getClass().getName() + "'!"; + _LOG.error( message ); + throw new IllegalArgumentException( message ); } - return t; + return t; } - Class<?> commonType = _extractCommonType(args); - if ( commonType != null ) { - TensorImpl<T> t = new TensorImpl<>(); - constructFor(t, CPU.get(), NDConstructor.of( args.length )) - .tryConstructing( - DataType.of(commonType), - args - ); - return t; + Class<?> commonType = _extractCommonType(args); + if ( commonType != null ) { + return new TensorImpl<>(constructFor(CPU.get(), NDConstructor.of( args.length )) + .tryConstructing( + DataType.of(commonType), + args + )); } /* EXPRESSION BASED CONSTRUCTION: @@ -142,50 +139,47 @@ Tensor<?> t = Tensor.of( "tanh(", x, ") * 7 **", y ); */ - boolean containsString = false; - int numberOfTensors = 0; - for ( Object o : args ) { - containsString = ( o instanceof String ) || containsString; - if ( o instanceof TensorImpl) - numberOfTensors++; + boolean containsString = false; + int numberOfTensors = 0; + for ( Object o : args ) { + containsString = ( o instanceof String ) || containsString; + if ( o instanceof TensorImpl) + numberOfTensors++; } - TensorImpl<T>[] tensors = new TensorImpl[ numberOfTensors ]; - StringBuilder f = new StringBuilder(); - int ti = 0; - for ( Object o : args ) { - if ( o instanceof TensorImpl) { - tensors[ ti ] = ( (TensorImpl<T>) o ); - f.append( "I[" ).append( ti ).append( "]" ); - ti++; + TensorImpl<T>[] tensors = new TensorImpl[ numberOfTensors ]; + StringBuilder f = new StringBuilder(); + int ti = 0; + for ( Object o : args ) { + if ( o instanceof TensorImpl) { + tensors[ ti ] = ( (TensorImpl<T>) o ); + f.append( "I[" ).append( ti ).append( "]" ); + ti++; } - else if ( o instanceof String ) f.append( (String) o ); + else if ( o instanceof String ) f.append( (String) o ); else - _LOG.debug( - "Unexpected tensor construction argument of type '"+o.getClass().getSimpleName()+"'" + _LOG.debug( + "Unexpected tensor construction argument of type '"+o.getClass().getSimpleName()+"'" ); } - if ( tensors.length == 0 || tensors[0] == null) return new TensorImpl<>(); - return Function.of( f.toString(), true ).call( tensors ); + if ( tensors.length == 0 || tensors[0] == null) return new TensorImpl<>(); + return Function.of( f.toString(), true ).call( tensors ); } static <T> Tensor<T> _of( Iterable<T> iterable ) { - List<T> list = new ArrayList<>(); - iterable.forEach( list::add ); - return _of( list ); + List<T> list = new ArrayList<>(); + iterable.forEach( list::add ); + return _of( list ); } static <T> Tensor<T> _of( List<T> list ) { - TensorImpl<T> t = new TensorImpl<>(); - Class<?> commonType = _extractCommonType( list.toArray() ); - // We construct the tensor: - constructFor(t, CPU.get(), NDConstructor.of( list.size() )) - .tryConstructing( - DataType.of(commonType), - list.toArray() - ); - return t; + return new TensorImpl<>( + constructFor(CPU.get(), NDConstructor.of( list.size() )) + .tryConstructing( + DataType.of(_extractCommonType( list.toArray() )), + list.toArray() + )); } @@ -194,14 +188,14 @@ * @return A common type or null if they are not all of the same type. */ private static Class<?> _extractCommonType( Object... args ) { - Class<?> commonType = null; - for ( Object o : args ) - if ( o != null ) { - if ( commonType == null ) commonType = o.getClass(); - else if ( !commonType.equals(o.getClass()) ) return null; + Class<?> commonType = null; + for ( Object o : args ) + if ( o != null ) { + if ( commonType == null ) commonType = o.getClass(); + else if ( !commonType.equals(o.getClass()) ) return null; } - return commonType; + return commonType; } // Constructors: @@ -215,80 +209,83 @@ * second parameter into this {@link Tensor} instance. * This constructor will be called by the {@link Tensor#newInstance()} factory method. */ - TensorImpl() { - _setData(new Data<V>() { - @Override public Device<V> owner() { return (Device<V>) CPU.get(); } - @Override public Object getOrNull() { return null;} + TensorImpl() { + _setData(new Data<V>() { + @Override public Device<V> owner() { return (Device<V>) CPU.get(); } + @Override public Object getOrNull() { return null;} @Override public DataType<V> dataType() { - return (DataType<V>) Neureka.get().settings().dtype().getDefaultDataType(); + return (DataType<V>) Neureka.get().settings().dtype().getDefaultDataType(); } - @Override public int usages() { return 1; } + @Override public int usages() { return 1; } }); - } + } + + TensorImpl( TensorConstructor.Args args ) { + NDConfiguration ndc = args.getConf(); + Boolean isVirtual = args.isVirtual(); + Data<V> data = (Data<V>) args.getData(); + if ( isVirtual != null ) + _setIsVirtual( isVirtual ); + if ( ndc != null ) + _setNDConf( ndc ); + if ( data != null ) + _setData( data ); + } public static <V> TensorImpl<V> _of( NDConstructor ndConstructor, Device device, DataType<V> dataType, Object value ) { - Object data = value; - if ( List.class.isAssignableFrom( dataType.getItemTypeClass() ) ) - data = new Object[]{ value }; // Make a nd-array of lists possible - if ( Object[].class.isAssignableFrom( dataType.getItemTypeClass() ) ) - data = new Object[]{ value }; // Make a nd-array of arrays possible - if ( Object.class == dataType.getItemTypeClass() ) { - if ( value.getClass() != Object[].class ) - data = new Object[]{ value }; + Object data = value; + if ( List.class.isAssignableFrom( dataType.getItemTypeClass() ) ) + data = new Object[]{ value }; // Make a nd-array of lists possible + if ( Object[].class.isAssignableFrom( dataType.getItemTypeClass() ) ) + data = new Object[]{ value }; // Make a nd-array of arrays possible + if ( Object.class == dataType.getItemTypeClass() ) { + if ( value.getClass() != Object[].class ) + data = new Object[]{ value }; } - if ( data instanceof List<?> ) { - List<?> range = (List<?>) data; - data = range.toArray();// TODO: This is probably wrong! + if ( data instanceof List<?> ) { + List<?> range = (List<?>) data; + data = range.toArray();// TODO: This is probably wrong! } - TensorImpl<V> t = new TensorImpl<>(); - constructFor(t, device, ndConstructor).tryConstructing( dataType, data ); - return t; + return new TensorImpl<>(constructFor(device, ndConstructor).tryConstructing( dataType, data )); } static <V> TensorImpl<V> _of( NDConstructor ndConstructor, DataType<V> dataType, Data<V> data ) { // We check if the type of the data is compatible with the type of the tensor: - if ( !dataType.getItemTypeClass().isAssignableFrom( data.dataType().getItemTypeClass() ) ) - throw new IllegalArgumentException( + if ( !dataType.getItemTypeClass().isAssignableFrom( data.dataType().getItemTypeClass() ) ) + throw new IllegalArgumentException( "The data type of the data is not compatible with the data type of the tensor!" ); - TensorImpl<V> t = new TensorImpl<>(); - constructFor(t, data.owner(), ndConstructor).constructTrusted( data ); - return t; + return new TensorImpl<>(constructFor(data.owner(), ndConstructor).constructTrusted( data )); } /** * see {@link Tensor#of(DataType, Shape, Filler)} */ static <V> TensorImpl<V> _of( NDConstructor ndConstructor, DataType<V> type, Filler<V> filler ) { - LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); - LogUtil.nullArgCheck( type, "type", DataType.class ); - LogUtil.nullArgCheck( type, "filler", Filler.class ); - TensorImpl<V> t = new TensorImpl<>(); - constructFor(t, CPU.get(), ndConstructor).unpopulated( false, true, type ); - t._initDataArrayFrom( filler ); - return t; + LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); + LogUtil.nullArgCheck( type, "type", DataType.class ); + LogUtil.nullArgCheck( type, "filler", Filler.class ); + TensorImpl<V> t = new TensorImpl<>(constructFor(CPU.get(), ndConstructor).unpopulated( false, true, type )); + t._initDataArrayFrom( filler ); + return t; } /** * See {@link Tensor#of(Class, Shape, neureka.math.args.Arg.Seed)} and {@link #of(List, String)} */ static <V> TensorImpl<V> _of( Class<V> valueType, NDConstructor ndConstructor, Arg.Seed seed ) { - LogUtil.nullArgCheck( valueType, "valueType", Class.class ); - LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); - LogUtil.nullArgCheck( seed, "seed", Arg.Seed.class ); - TensorImpl<V> t = new TensorImpl<>(); - constructFor(t, CPU.get(), ndConstructor).newSeeded( valueType, seed ); - return t; + LogUtil.nullArgCheck( valueType, "valueType", Class.class ); + LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); + LogUtil.nullArgCheck( seed, "seed", Arg.Seed.class ); + return new TensorImpl<>(constructFor(CPU.get(), ndConstructor).newSeeded( valueType, seed )); } static <V> TensorImpl<V> _of( NDConstructor ndConstructor, DataType<?> type ) { - LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); - LogUtil.nullArgCheck( type, "type", DataType.class ); - TensorImpl<V> t = new TensorImpl<>(); - constructFor(t, CPU.get(), ndConstructor).unpopulated( true, true, type ); - return t; + LogUtil.nullArgCheck(ndConstructor, "ndcProducer", NDConstructor.class ); + LogUtil.nullArgCheck( type, "type", DataType.class ); + return new TensorImpl<>(constructFor(CPU.get(), ndConstructor).unpopulated( true, true, type )); } /*================================================================================================================== @@ -300,34 +297,34 @@ /** {@inheritDoc} */ @Override public Tensor<V> setRqsGradient( boolean rqsGradient ) { - if ( rqsGradient() != rqsGradient ) { - if ( !rqsGradient ) this.remove( TensorImpl.class ); - else if ( has(GraphNode.class) ) { - if ( getGraphNode().map( n -> n.getMode() == 0 ).orElse(false) ) - remove(GraphNode.class); + if ( rqsGradient() != rqsGradient ) { + if ( !rqsGradient ) this.remove( TensorImpl.class ); + else if ( has(GraphNode.class) ) { + if ( getGraphNode().map( n -> n.getMode() == 0 ).orElse(false) ) + remove(GraphNode.class); else - throw new IllegalArgumentException( + throw new IllegalArgumentException( "This tensor is already part of a gradient dependent graph as " + "branch node and therefore cannot be removed from it." ); } } - _setRqsGradient( rqsGradient ); - return this; + _setRqsGradient( rqsGradient ); + return this; } /** {@inheritDoc} */ - @Override public boolean rqsGradient() { return ( _flags & RQS_GRADIENT_MASK ) == RQS_GRADIENT_MASK; } + @Override public boolean rqsGradient() { return ( _flags & RQS_GRADIENT_MASK ) == RQS_GRADIENT_MASK; } private void _setRqsGradient( boolean rqsGradient ) { - if ( rqsGradient() != rqsGradient ) { - if ( rqsGradient ) _flags += RQS_GRADIENT_MASK; - else _flags -= RQS_GRADIENT_MASK; + if ( rqsGradient() != rqsGradient ) { + if ( rqsGradient ) _flags += RQS_GRADIENT_MASK; + else _flags -= RQS_GRADIENT_MASK; } - } + } /** {@inheritDoc} */ - @Override public boolean isIntermediate() { return ( _flags & IS_INTERMEDIATE_MASK ) == IS_INTERMEDIATE_MASK; } + @Override public boolean isIntermediate() { return ( _flags & IS_INTERMEDIATE_MASK ) == IS_INTERMEDIATE_MASK; } /** * Intermediate tensors are internal non-user tensors which may be eligible @@ -338,52 +335,66 @@ * tensor which may be eligible for deletion by {@link Function}s consuming it. */ private Tensor<V> _setIsIntermediate( boolean isIntermediate ) { - if ( isIntermediate() != isIntermediate ) { - if ( isIntermediate ) _flags += IS_INTERMEDIATE_MASK; - else _flags -= IS_INTERMEDIATE_MASK; + if ( isIntermediate() != isIntermediate ) { + if ( isIntermediate ) _flags += IS_INTERMEDIATE_MASK; + else _flags -= IS_INTERMEDIATE_MASK; } - return this; + return this; } /** {@inheritDoc} */ @Override - public boolean isVirtual() { return ( _flags & IS_VIRTUAL_MASK ) == IS_VIRTUAL_MASK; } + public boolean isVirtual() { return ( _flags & IS_VIRTUAL_MASK ) == IS_VIRTUAL_MASK; } /** {@inheritDoc} */ @Override public Tensor<V> setIsVirtual(boolean isVirtual ) { - if ( getNDConf() == null ) - throw new IllegalStateException( + if ( getNDConf() == null ) + throw new IllegalStateException( "Cannot set the virtual flag of a tensor which has not been constructed yet!" ); - if ( isVirtual() != isVirtual ) + if ( isVirtual() != isVirtual ) { - if ( isVirtual ) - _virtualize(); + if ( isVirtual ) + _virtualize(); else - _actualize(); + _actualize(); // Virtual and actual tensors require a different mapping from a given index to the underlying data.. // Therefore, we need to re-initialize the NDConfiguration object: - constructFor(this, getDevice(),NDConstructor.of(getNDConf().shape())).unpopulated( isVirtual, false, getDataType() ); - if ( isVirtual ) - this.find( Relation.class ) - .ifPresent( r -> - r.getChildren().forEach(c -> { - ((TensorImpl<V>)c)._setData( _getData() ); - ((TensorImpl<V>)c).setIsVirtual( true ); - }) + TensorConstructor.Args args = constructFor(getDevice(),NDConstructor.of(getNDConf().shape())).unpopulated( isVirtual, false, getDataType() ); + _setState( args ); + + if ( isVirtual ) + this.find( Relation.class ) + .ifPresent( r -> + r.getChildren().forEach(c -> { + ((TensorImpl<V>)c)._setData( _getData() ); + ((TensorImpl<V>)c).setIsVirtual( true ); + }) ); else - this.find(Relation.class) - .map( relation -> ((Relation<V>)relation).getParent().orElse(null) ) - .map( parent -> parent.get(Relation.class) ) - .ifPresent( parentRelation -> parentRelation.removeChild( this ) ); - } - else if ( isVirtual ) _allocateVirtual(); //> Only a single value representing the rest. - return this; - } + this.find(Relation.class) + .map( relation -> ((Relation<V>)relation).getParent().orElse(null) ) + .map( parent -> parent.get(Relation.class) ) + .ifPresent( parentRelation -> parentRelation.removeChild( this ) ); + } + else if ( isVirtual ) _allocateVirtual(); //> Only a single value representing the rest. + return this; + } + + private void _setState(TensorConstructor.Args args) { + Boolean isVirtual = args.isVirtual(); + NDConfiguration ndc = args.getConf(); + Data<V> data = (Data<V>) args.getData(); + if ( isVirtual != null ) + _setIsVirtual( isVirtual ); + if ( ndc != null ) + _setNDConf( ndc ); + if ( data != null ) + _setData( data ); + } /** * This method is the inner counterpart to the public "{@link MutateTensor#setIsVirtual}" method. @@ -393,36 +404,36 @@ */ @Override protected void _setIsVirtual( boolean isVirtual ) { - if ( isVirtual() != isVirtual ) { - if ( isVirtual ) _flags += IS_VIRTUAL_MASK; - else _flags -= IS_VIRTUAL_MASK; + if ( isVirtual() != isVirtual ) { + if ( isVirtual ) _flags += IS_VIRTUAL_MASK; + else _flags -= IS_VIRTUAL_MASK; } - } + } /** {@inheritDoc} */ @Override - public boolean isDeleted() { return ( _flags & IS_DELETED_MASK ) == IS_DELETED_MASK; } + public boolean isDeleted() { return ( _flags & IS_DELETED_MASK ) == IS_DELETED_MASK; } /** {@inheritDoc} */ @Override - public boolean gradientApplyRequested() { return ( _flags & GRADIENT_APPLY_RQD_MASK ) == GRADIENT_APPLY_RQD_MASK; } + public boolean gradientApplyRequested() { return ( _flags & GRADIENT_APPLY_RQD_MASK ) == GRADIENT_APPLY_RQD_MASK; } /** {@inheritDoc} */ @Override public Tensor<V> setGradientApplyRequested(boolean applyRequested ) { - if ( gradientApplyRequested() != applyRequested ) { - if ( applyRequested ) { + if ( gradientApplyRequested() != applyRequested ) { + if ( applyRequested ) { if ( - Neureka.get().settings().autograd().isApplyingGradientWhenRequested() && - !Neureka.get().settings().autograd().isApplyingGradientWhenTensorIsUsed() + Neureka.get().settings().autograd().isApplyingGradientWhenRequested() && + !Neureka.get().settings().autograd().isApplyingGradientWhenTensorIsUsed() ) - this.applyGradient(); + this.applyGradient(); else - _flags += GRADIENT_APPLY_RQD_MASK; + _flags += GRADIENT_APPLY_RQD_MASK; } - else _flags -= GRADIENT_APPLY_RQD_MASK; + else _flags -= GRADIENT_APPLY_RQD_MASK; } - return this; + return this; } /** @@ -438,22 +449,22 @@ */ private Tensor<V> _delete() { - if ( isDeleted() ) return this; - getGraphNode().ifPresent( n -> { - if ( !n.canBeDeleted() ) { - String message = "Cannot delete a tensor which is used as derivative by the AD computation graph!"; - _LOG.error( message ); - throw new IllegalStateException( message ); + if ( isDeleted() ) return this; + getGraphNode().ifPresent( n -> { + if ( !n.canBeDeleted() ) { + String message = "Cannot delete a tensor which is used as derivative by the AD computation graph!"; + _LOG.error( message ); + throw new IllegalStateException( message ); } - }); - this.find( Device.class ).ifPresent( device -> device.free( this ) ); - _setData( null ); - _setNDConf( null ); - _flags = 0; - this.find( TensorImpl.class ).ifPresent(t -> t.mut().delete() ); - _deleteComponents(); - _flags += IS_DELETED_MASK; - return this; + }); + this.find( Device.class ).ifPresent( device -> device.free( this ) ); + _setData( null ); + _setNDConf( null ); + _flags = 0; + this.find( TensorImpl.class ).ifPresent(t -> t.mut().delete() ); + _deleteComponents(); + _flags += IS_DELETED_MASK; + return this; } /*================================================================================================================== @@ -465,14 +476,14 @@ /** {@inheritDoc} */ @Override public <T extends Component<?>> T get( Class<T> componentClass ) { - LogUtil.nullArgCheck( componentClass, "componentClass", Class.class ); + LogUtil.nullArgCheck( componentClass, "componentClass", Class.class ); - if ( GraphNode.class.isAssignableFrom(componentClass) ) - _guardGet(componentClass.getSimpleName()); - else if ( NDFrame.class.isAssignableFrom(componentClass) ) - _guardGet(componentClass.getSimpleName()); + if ( GraphNode.class.isAssignableFrom(componentClass) ) + _guardGet(componentClass.getSimpleName()); + else if ( NDFrame.class.isAssignableFrom(componentClass) ) + _guardGet(componentClass.getSimpleName()); - return super.get(componentClass); + return super.get(componentClass); } /** @@ -485,7 +496,7 @@ * @return The unchanged object or maybe in future versions: null (component rejected) */ @Override - protected < T extends Component<Tensor<V>> > T _setOrReject(T newComponent ) { return newComponent; } + protected < T extends Component<Tensor<V>> > T _setOrReject(T newComponent ) { return newComponent; } /** * This method is executed when a component is being removed from the tensor. @@ -499,28 +510,28 @@ @Override protected <T extends Component<Tensor<V>>> T _removeOrReject(T newComponent ) { - if ( newComponent instanceof Device ) { - Device<V> device = (Device<V>) newComponent; + if ( newComponent instanceof Device ) { + Device<V> device = (Device<V>) newComponent; /* The following seems like a redundant check, however often times a tensor will be removed from a Device implementation inside the "restore" method when the tensor has already been removed from the device... Without the condition below a stack overflow would occur! */ - if ( device.has( this ) ) { + if ( device.has( this ) ) { try { - device.restore( this ); - } catch ( Exception exception ) { - _LOG.error( + device.restore( this ); + } catch ( Exception exception ) { + _LOG.error( "Removing device from tensor / tensor from device failed.\n" + "Restoring tensor from device threw exception.\n", exception ); - throw exception; - } + throw exception; + } } } - return newComponent; + return newComponent; } @@ -534,7 +545,7 @@ * {@inheritDoc} */ @Override - public int getVersion() { return _version; } + public int getVersion() { return _version; } /*================================================================================================================== @@ -553,58 +564,58 @@ * @param call The context object containing all relevant information that defines a call for tensor execution. */ private void _incrementVersionBecauseOf( ExecutionCall<?> call ) { - if ( Neureka.get().settings().autograd().isPreventingInlineOperations() ) { - _version++; // Autograd must be warned! - GraphNode<?> node = get( GraphNode.class ); - if ( node != null && node.getPayloadReferenceVersion() != _version ) { - if ( node.usesAD() || node.isUsedAsDerivative() ) { - String error = "Inline operation occurred on tensor which is part of a computation graph node with autograd support!\n" + - "The following OperationType caused an internal version mismatch: '"+call.getOperation().getIdentifier()+"'"; - _LOG.error( error ); - throw new IllegalStateException( error ); + if ( Neureka.get().settings().autograd().isPreventingInlineOperations() ) { + _version++; // Autograd must be warned! + GraphNode<?> node = get( GraphNode.class ); + if ( node != null && node.getPayloadReferenceVersion() != _version ) { + if ( node.usesAD() || node.isUsedAsDerivative() ) { + String error = "Inline operation occurred on tensor which is part of a computation graph node with autograd support!\n" + + "The following OperationType caused an internal version mismatch: '"+call.getOperation().getIdentifier()+"'"; + _LOG.error( error ); + throw new IllegalStateException( error ); } } } - } + } /** * {@inheritDoc} */ @Override public MutateTensor<V> getMut() { - _guardGet("unsafe API"); - return this; + _guardGet("unsafe API"); + return this; } /** {@inheritDoc} */ @Override public MutateNda.Item<V> at(int... indices ) { - return new MutateNda.Item<V>() { - @Override public V orElseNull() { return item( indices ); } - @Override public void set( V value ) { getMut().putAt( indices, value ); } + return new MutateNda.Item<V>() { + @Override public V orElseNull() { return item( indices ); } + @Override public void set( V value ) { getMut().putAt( indices, value ); } @Override public boolean equals( Object o ) { - if ( o == null ) return false; - if ( o == this ) return true; - if ( o.getClass() != this.getClass() ) return false; - Nda.Item<V> other = (Nda.Item<V>) o; - return this.get().equals( other.get() ); + if ( o == null ) return false; + if ( o == this ) return true; + if ( o.getClass() != this.getClass() ) return false; + Nda.Item<V> other = (Nda.Item<V>) o; + return this.get().equals( other.get() ); } - @Override public int hashCode() { V item = get(); return ( item == null ? 0 : item.hashCode() ); } - @Override public String toString() { return String.valueOf( get() ); } + @Override public int hashCode() { V item = get(); return ( item == null ? 0 : item.hashCode() ); } + @Override public String toString() { return String.valueOf( get() ); } }; } /** * {@inheritDoc} */ @Override - public Tensor<V> setNDConf(NDConfiguration configuration ) { TensorImpl.this._setNDConf( configuration ); return TensorImpl.this; } + public Tensor<V> setNDConf(NDConfiguration configuration ) { TensorImpl.this._setNDConf( configuration ); return TensorImpl.this; } /** * {@inheritDoc} */ @Override public <V> Tensor<V> toType(Class<V> typeClass ) { - LogUtil.nullArgCheck( typeClass, "typeClass", Class.class, "Cannot convert tensor to 'null' data type." ); - return TensorImpl.this._toType( typeClass ); + LogUtil.nullArgCheck( typeClass, "typeClass", Class.class, "Cannot convert tensor to 'null' data type." ); + return TensorImpl.this._toType( typeClass ); } /** @@ -612,11 +623,11 @@ */ @Override public <U> Tensor<U> upcast(Class<U> superType ) { - LogUtil.nullArgCheck( superType, "superType", Class.class ); - if ( superType.isAssignableFrom(TensorImpl.this.itemType()) ) - return (Tensor<U>) TensorImpl.this; + LogUtil.nullArgCheck( superType, "superType", Class.class ); + if ( superType.isAssignableFrom(TensorImpl.this.itemType()) ) + return (Tensor<U>) TensorImpl.this; else - throw new IllegalArgumentException("Provided type '"+superType+"' is not a super type of '"+ TensorImpl.this.itemType()+"'."); + throw new IllegalArgumentException("Provided type '"+superType+"' is not a super type of '"+ TensorImpl.this.itemType()+"'."); } /** @@ -624,8 +635,8 @@ */ @Override public Tensor<V> toLayout(NDConfiguration.Layout layout ) { - ReLayout.toLayout( this, layout ); - return TensorImpl.this; + ReLayout.toLayout( this, layout ); + return TensorImpl.this; } /** @@ -633,33 +644,33 @@ */ @Override public Tensor<V> incrementVersion(ExecutionCall<?> call ) { - LogUtil.nullArgCheck( call, "call", ExecutionCall.class ); - _incrementVersionBecauseOf( call ); - return TensorImpl.this; + LogUtil.nullArgCheck( call, "call", ExecutionCall.class ); + _incrementVersionBecauseOf( call ); + return TensorImpl.this; } /** * {@inheritDoc} */ @Override - public Tensor<V> setIsIntermediate(boolean isIntermediate ) { return _setIsIntermediate( isIntermediate ); } + public Tensor<V> setIsIntermediate(boolean isIntermediate ) { return _setIsIntermediate( isIntermediate ); } /** * {@inheritDoc} */ - @Override public Tensor<V> delete() { return TensorImpl.this._delete(); } + @Override public Tensor<V> delete() { return TensorImpl.this._delete(); } /** * {@inheritDoc} */ - @Override public Data<V> getData() { return _getData(); } + @Override public Data<V> getData() { return _getData(); } /** * {@inheritDoc} */ @Override public <A> A getDataAs( Class<A> arrayTypeClass ) { - return DataConverter.get().convert( _getData(false), arrayTypeClass ); + return DataConverter.get().convert( _getData(false), arrayTypeClass ); } /** @@ -667,9 +678,9 @@ */ @Override public Tensor<V> setDataAt(int i, V o ) { - _guardMod("data object"); - _setDataAt( i, o ); - return TensorImpl.this; + _guardMod("data object"); + _setDataAt( i, o ); + return TensorImpl.this; } /** @@ -677,131 +688,131 @@ */ @Override public Tensor<V> setData(Data<V> data ) { - TensorImpl.this._setData( data ); - return TensorImpl.this; + TensorImpl.this._setData( data ); + return TensorImpl.this; } /** * {@inheritDoc} */ - @Override public Tensor<V> detach() { TensorImpl.this.remove( GraphNode.class ); return TensorImpl.this; } + @Override public Tensor<V> detach() { TensorImpl.this.remove( GraphNode.class ); return TensorImpl.this; } /** {@inheritDoc} */ @Override public Tensor<V> timesAssign(Tensor<V> other ) { - LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot multiply-assign 'null' to a tensor!"); - return Neureka.get().backend().getFunction().mulAssign().call( TensorImpl.this, other ); + LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot multiply-assign 'null' to a tensor!"); + return Neureka.get().backend().getFunction().mulAssign().call( TensorImpl.this, other ); } /** {@inheritDoc} */ @Override public Tensor<V> timesAssign(V other ) { - LogUtil.nullArgCheck(other, "other", TensorImpl.this.getItemType(), "Cannot multiply-assign 'null' to a tensor!"); - return this.timesAssign( Tensor.of( getItemType(), this.shape(), other ) ); + LogUtil.nullArgCheck(other, "other", TensorImpl.this.getItemType(), "Cannot multiply-assign 'null' to a tensor!"); + return this.timesAssign( Tensor.of( getItemType(), this.shape(), other ) ); } /** {@inheritDoc} */ @Override public Tensor<V> divAssign(Tensor<V> other ) { - LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot divide-assign a tensor by 'null' (In any sense of the word)!"); - return Neureka.get().backend().getFunction().divAssign().call( TensorImpl.this, other ); + LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot divide-assign a tensor by 'null' (In any sense of the word)!"); + return Neureka.get().backend().getFunction().divAssign().call( TensorImpl.this, other ); } /** {@inheritDoc} */ @Override public Tensor<V> modAssign(Tensor<V> other ) { - LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot perform tensor modulo 'null'!"); - return Neureka.get().backend().getFunction().modAssign().call( TensorImpl.this, other ); + LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot perform tensor modulo 'null'!"); + return Neureka.get().backend().getFunction().modAssign().call( TensorImpl.this, other ); } /** {@inheritDoc} */ @Override public Tensor<V> plusAssign(Tensor<V> other ) { - LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot add-assign 'null' to a tensor!"); - return Neureka.get().backend().getFunction().plusAssign().call( TensorImpl.this, other ); + LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot add-assign 'null' to a tensor!"); + return Neureka.get().backend().getFunction().plusAssign().call( TensorImpl.this, other ); } /** {@inheritDoc} */ @Override public Tensor<V> minusAssign(Tensor<V> other ) { - LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot subtract-assign 'null' from a tensor!"); - return Neureka.get().backend().getFunction().minusAssign().call( TensorImpl.this, other ); + LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot subtract-assign 'null' from a tensor!"); + return Neureka.get().backend().getFunction().minusAssign().call( TensorImpl.this, other ); } /** {@inheritDoc} */ @Override public Tensor<V> minusAssign(V other ) { - LogUtil.nullArgCheck(other, "other", TensorImpl.this.getItemType(), "Cannot subtract-assign 'null' from a tensor!"); - return minusAssign( - Tensor.of( TensorImpl.this.getDataType().getItemTypeClass() ) - .withShape(TensorImpl.this.getNDConf().shape()) - .all(other) + LogUtil.nullArgCheck(other, "other", TensorImpl.this.getItemType(), "Cannot subtract-assign 'null' from a tensor!"); + return minusAssign( + Tensor.of( TensorImpl.this.getDataType().getItemTypeClass() ) + .withShape(TensorImpl.this.getNDConf().shape()) + .all(other) ); } @Override public Tensor<V> assign(V other ) { - LogUtil.nullArgCheck(other, "other", TensorImpl.this.getItemType(), "Cannot subtract-assign 'null' from a tensor!"); - return assign( - Tensor.of( TensorImpl.this.getDataType().getItemTypeClass() ) - .withShape(TensorImpl.this.getNDConf().shape()) - .all(other) + LogUtil.nullArgCheck(other, "other", TensorImpl.this.getItemType(), "Cannot subtract-assign 'null' from a tensor!"); + return assign( + Tensor.of( TensorImpl.this.getDataType().getItemTypeClass() ) + .withShape(TensorImpl.this.getNDConf().shape()) + .all(other) ); } @Override public Tensor<V> assign(Nda<V> other ) { - LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot assign 'null' to a tensor!"); - return Neureka.get().backend().getFunction().idy().call( TensorImpl.this, (Tensor<V>) other ); + LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot assign 'null' to a tensor!"); + return Neureka.get().backend().getFunction().idy().call( TensorImpl.this, (Tensor<V>) other ); } @Override public Tensor<V> labelAxes(String[]... labels ) { - LogUtil.nullArgCheck(labels, "labels", String[][].class, "Tensors cannot be labeled 'null'!"); - if ( labels.length > this.rank() ) - throw new IllegalArgumentException( + LogUtil.nullArgCheck(labels, "labels", String[][].class, "Tensors cannot be labeled 'null'!"); + if ( labels.length > this.rank() ) + throw new IllegalArgumentException( "Number of the provided axes labels is larger than the total number of axes (rank) of the nd-array." ); - NDFrame<V> frame = get( NDFrame.class ); - if ( frame == null ) { - frame = new NDFrame<>( this, null); - this.set(frame); + NDFrame<V> frame = get( NDFrame.class ); + if ( frame == null ) { + frame = new NDFrame<>( this, null); + this.set(frame); } - for ( int i = 0; i < labels.length; i++ ) { - if ( labels[ i ] != null ) { - AxisFrame<Integer, V> atAxis = frame.atAxis( i ); - for ( int ii = 0; ii < labels[ i ].length; ii++ ) { - if ( labels[ i ][ ii ] != null ) - atAxis.atIndexAlias( labels[ i ][ ii ] ).setIndex( ii ); + for ( int i = 0; i < labels.length; i++ ) { + if ( labels[ i ] != null ) { + AxisFrame<Integer, V> atAxis = frame.atAxis( i ); + for ( int ii = 0; ii < labels[ i ].length; ii++ ) { + if ( labels[ i ][ ii ] != null ) + atAxis.atIndexAlias( labels[ i ][ ii ] ).setIndex( ii ); } } } - return this; + return this; } /** {@inheritDoc} */ @Override public Tensor<V> labelAxes(List<List<Object>> labels ) { - LogUtil.nullArgCheck(labels, "labels", List.class, "Tensors cannot be labeled 'null'!"); - NDFrame<V> frame = get( NDFrame.class ); - if ( frame == null ) set( new NDFrame<>( labels, this, null ) ); - else set( frame.withAxesLabels( labels ) ); - return TensorImpl.this; + LogUtil.nullArgCheck(labels, "labels", List.class, "Tensors cannot be labeled 'null'!"); + NDFrame<V> frame = get( NDFrame.class ); + if ( frame == null ) set( new NDFrame<>( labels, this, null ) ); + else set( frame.withAxesLabels( labels ) ); + return TensorImpl.this; } /** {@inheritDoc} */ @Override public Tensor<V> label(String label ) { - LogUtil.nullArgCheck( label, "label", List.class, "Tensors cannot be labeled 'null'!" ); - NDFrame<V> frame = get( NDFrame.class ); - if ( frame == null ) set( new NDFrame<>( Collections.emptyList(), this, label ) ); - else set( frame.withLabel(label) ); - return TensorImpl.this; + LogUtil.nullArgCheck( label, "label", List.class, "Tensors cannot be labeled 'null'!" ); + NDFrame<V> frame = get( NDFrame.class ); + if ( frame == null ) set( new NDFrame<>( Collections.emptyList(), this, label ) ); + else set( frame.withLabel(label) ); + return TensorImpl.this; } /** {@inheritDoc} */ @Override public Tensor<V> labelAxes(Map<Object, List<Object>> labels ) { - LogUtil.nullArgCheck(labels, "labels", Map.class, "Tensors cannot be labeled 'null'!"); - String label = getLabel(); - label = label == null || label.isEmpty() ? null : label; - TensorImpl.this.set( new NDFrame<>( labels, TensorImpl.this, label ) ); - return TensorImpl.this; + LogUtil.nullArgCheck(labels, "labels", Map.class, "Tensors cannot be labeled 'null'!"); + String label = getLabel(); + label = label == null || label.isEmpty() ? null : label; + TensorImpl.this.set( new NDFrame<>( labels, TensorImpl.this, label ) ); + return TensorImpl.this; } /*================================================================================================================== @@ -819,20 +830,20 @@ @Override public Iterator<V> iterator() { - NDIterator _ndi = NDIterator.of( this ); - return new Iterator<V>() - { - private final int _size = TensorImpl.this.size(); - private int _count = 0; + NDIterator _ndi = NDIterator.of( this ); + return new Iterator<V>() + { + private final int _size = TensorImpl.this.size(); + private int _count = 0; - @Override public boolean hasNext() { return _count != _size; } + @Override public boolean hasNext() { return _count != _size; } @Override public V next() { - V value = TensorImpl.this.getDataAt( _ndi.i() ); - _ndi.increment(); - _count ++; - return value; + V value = TensorImpl.this.getDataAt( _ndi.i() ); + _ndi.increment(); + _count ++; + return value; } }; } @@ -849,8 +860,8 @@ */ @Override public Tensor<V> to(Device<?> device ){ - if ( this.getDevice() != device ) super._set( device ); - return this; + if ( this.getDevice() != device ) super._set( device ); + return this; } /** @@ -859,50 +870,50 @@ * This is to avoid unnecessary allocations and computations. */ void _backward( LazyRef<Tensor<V>> error ) { - LogUtil.nullArgCheck(error, "error", Tensor.class, "Cannot back-propagate 'null'!"); - LazyRef<Tensor<V>> errorRef = this.isOutsourced() - ? LazyRef.of(()->error.get().deepCopy().to(this.getDevice())) - : error; + LogUtil.nullArgCheck(error, "error", Tensor.class, "Cannot back-propagate 'null'!"); + LazyRef<Tensor<V>> errorRef = this.isOutsourced() + ? LazyRef.of(()->error.get().deepCopy().to(this.getDevice())) + : error; - find( GraphNode.class ).ifPresent( node -> node.backward(errorRef.get()) ); + find( GraphNode.class ).ifPresent( node -> node.backward(errorRef.get()) ); - if ( this.rqsGradient() ) - mut().addToGradient( errorRef.get() ); - } + if ( this.rqsGradient() ) + mut().addToGradient( errorRef.get() ); + } @Override public Tensor<V> withLabel(String label ) { - Tensor<V> copy = this.shallowCopy(); - if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... - copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); - return copy.mut().label( label ); + Tensor<V> copy = this.shallowCopy(); + if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... + copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); + return copy.mut().label( label ); } /** {@inheritDoc} */ @Override public Tensor<V> withLabels(String[]... labels ) { - Tensor<V> copy = this.shallowCopy(); - if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... - copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); - return copy.mut().labelAxes( labels ); + Tensor<V> copy = this.shallowCopy(); + if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... + copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); + return copy.mut().labelAxes( labels ); } /** {@inheritDoc} */ @Override public Tensor<V> withLabels(List<List<Object>> labels ) { - Tensor<V> copy = this.shallowCopy(); - if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... - copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); - return copy.getMut().labelAxes( labels ); + Tensor<V> copy = this.shallowCopy(); + if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... + copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); + return copy.getMut().labelAxes( labels ); } /** {@inheritDoc} */ @Override public Tensor<V> withLabels(Map<Object, List<Object>> labels ) { - Tensor<V> copy = this.shallowCopy(); - if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... - copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); - return copy.getMut().labelAxes( labels ); + Tensor<V> copy = this.shallowCopy(); + if ( copy.label().endsWith(":slice") ) // We remove the slice postfix if it exists... + copy = copy.shallowClone().mut().label( copy.label().substring(0, copy.label().length()-6) ); + return copy.getMut().labelAxes( labels ); } /*================================================================================================================== @@ -915,10 +926,10 @@ /** {@inheritDoc} */ @Override public boolean isCase( Tensor<V> other ) { - LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot perform 'is case' operation when second operand is 'null'!"); - return this.find( Relation.class ) - .map( r -> ((Relation<?>)r).getChildren().stream().anyMatch( (Tensor<?> c) -> c.equals(other) )) - .orElse(false); + LogUtil.nullArgCheck(other, "other", Tensor.class, "Cannot perform 'is case' operation when second operand is 'null'!"); + return this.find( Relation.class ) + .map( r -> ((Relation<?>)r).getChildren().stream().anyMatch( (Tensor<?> c) -> c.equals(other) )) + .orElse(false); } /*================================================================================================================== @@ -931,16 +942,16 @@ /** {@inheritDoc} */ @Override public Tensor<V> getAt(int... indices ) { - LogUtil.nullArgCheck(indices, "indices", int[].class, "Indices array must not be 'null'!"); - return getAt( Arrays.stream( indices ).boxed().toArray() ); + LogUtil.nullArgCheck(indices, "indices", int[].class, "Indices array must not be 'null'!"); + return getAt( Arrays.stream( indices ).boxed().toArray() ); } /** {@inheritDoc} */ @Override public Tensor<V> getAt(Map<?,Integer> rangToSteps) { - LogUtil.nullArgCheck(rangToSteps, "rankToSteps", Map.class, "Rank-to-steps map must not be 'null'!"); + LogUtil.nullArgCheck(rangToSteps, "rankToSteps", Map.class, "Rank-to-steps map must not be 'null'!"); // ...not a simple slice... Advanced: - return SmartSlicer.slice(new Object[]{rangToSteps}, this); + return SmartSlicer.slice(new Object[]{rangToSteps}, this); } /** @@ -948,35 +959,35 @@ */ @Override public Tensor<V> getAt(List<?> key ) { - LogUtil.nullArgCheck( key, "key", List.class ); - if ( key.stream().anyMatch( i -> i == null ) ) - throw new IllegalArgumentException("List of indices/ranges may not contain entries which are null!"); - if ( key.isEmpty() ) { + LogUtil.nullArgCheck( key, "key", List.class ); + if ( key.stream().anyMatch( i -> i == null ) ) + throw new IllegalArgumentException("List of indices/ranges may not contain entries which are null!"); + if ( key.isEmpty() ) { /* An empty List instance is being interpreted as the request to create an identical slice, meaning that the resulting tensor views the same data as its parent while not being the same instance. (In a sense, its a shallow copy!) */ - return shallowCopy(); + return shallowCopy(); } - Object[] indices = key.toArray(); - - boolean allInt = true; - for ( Object o : indices ) allInt = allInt && o instanceof Integer; - if ( allInt && indices.length == rank() ) { - int[] newOffset = DataConverter.get().convert(indices, int[].class); - for ( int i = 0; i < this.rank(); i++ ) - newOffset[ i ] = ( newOffset[ i ] < 0 ) ? getNDConf().shape( i ) + newOffset[ i ] : newOffset[ i ]; - for ( int i = 0; i < this.rank(); i++ ) - indices[ i ] = newOffset[ i ]; - allInt = false; + Object[] indices = key.toArray(); + + boolean allInt = true; + for ( Object o : indices ) allInt = allInt && o instanceof Integer; + if ( allInt && indices.length == rank() ) { + int[] newOffset = DataConverter.get().convert(indices, int[].class); + for ( int i = 0; i < this.rank(); i++ ) + newOffset[ i ] = ( newOffset[ i ] < 0 ) ? getNDConf().shape( i ) + newOffset[ i ] : newOffset[ i ]; + for ( int i = 0; i < this.rank(); i++ ) + indices[ i ] = newOffset[ i ]; + allInt = false; } - boolean hasScale = false; - for ( Object o : indices ) hasScale = hasScale || o instanceof Map; - return SmartSlicer.slice( - ( allInt ? new Object[]{ DataConverter.get().convert(indices, int[].class) } : indices ), + boolean hasScale = false; + for ( Object o : indices ) hasScale = hasScale || o instanceof Map; + return SmartSlicer.slice( + ( allInt ? new Object[]{ DataConverter.get().convert(indices, int[].class) } : indices ), this ); } @@ -984,50 +995,50 @@ /** {@inheritDoc} */ @Override public TensorImpl<V> deepCopy() { - return _clone( false ); + return _clone( false ); } /** {@inheritDoc} */ @Override public Tensor<V> deepClone() { - return _clone( true ); + return _clone( true ); } private TensorImpl<V> _clone(boolean autograd) { - Function cloner = autograd ? Neureka.get().backend().getAutogradFunction().idy() : Neureka.get().backend().getFunction().idy(); - boolean thisIsIntermediate = this.isIntermediate(); - _setIsIntermediate( false ); - Tensor<V> clone = Tensor.like( this ) - .all( (V) Double.valueOf(0.0) ); + Function cloner = autograd ? Neureka.get().backend().getAutogradFunction().idy() : Neureka.get().backend().getFunction().idy(); + boolean thisIsIntermediate = this.isIntermediate(); + _setIsIntermediate( false ); + Tensor<V> clone = Tensor.like( this ) + .all( (V) Double.valueOf(0.0) ); - if ( clone.itemType() != this.itemType() ) - throw new IllegalStateException("Item type of clone must be the same as the item type of the original!"); + if ( clone.itemType() != this.itemType() ) + throw new IllegalStateException("Item type of clone must be the same as the item type of the original!"); - clone = cloner.call( clone, this ); - clone.getMut().setIsIntermediate( thisIsIntermediate ); - _setIsIntermediate( thisIsIntermediate ); - return (TensorImpl<V>) clone; + clone = cloner.call( clone, this ); + clone.getMut().setIsIntermediate( thisIsIntermediate ); + _setIsIntermediate( thisIsIntermediate ); + return (TensorImpl<V>) clone; } /** * {@inheritDoc} */ @Override - public AxisOrGetTensor<V> slice() { return new SliceBuilder<>( this ); } + public AxisOrGetTensor<V> slice() { return new SliceBuilder<>( this ); } /** * {@inheritDoc} */ @Override public Tensor<V> putAt(List<?> key, Nda<V> value ) { - _putAtCheckFor( (Tensor<?>) value ); - Tensor<V> slice = ( key == null ) ? this : getAt( key ); - Data<V> thisData = this.getMut().getData(); - Object thisDataRef = ( thisData != null ? thisData.getOrNull() : null ); - if ( thisDataRef != null && !thisDataRef.equals(slice.getMut().getData().getOrNull()) ) - throw new IllegalStateException("Failed to isolate slice for inline assignment!"); + _putAtCheckFor( (Tensor<?>) value ); + Tensor<V> slice = ( key == null ) ? this : getAt( key ); + Data<V> thisData = this.getMut().getData(); + Object thisDataRef = ( thisData != null ? thisData.getOrNull() : null ); + if ( thisDataRef != null && !thisDataRef.equals(slice.getMut().getData().getOrNull()) ) + throw new IllegalStateException("Failed to isolate slice for inline assignment!"); - return _putAt( slice, (Tensor<V>) value ); + return _putAt( slice, (Tensor<V>) value ); } /** @@ -1035,22 +1046,22 @@ */ @Override public Tensor<V> putAt(int[] indices, V item ) { - if ( indices == null ) - throw new IllegalArgumentException( "Provided indices are null!" ); - if ( indices.length > this.rank() ) { - int[] correct = new int[rank()]; - System.arraycopy( indices, 0, correct, 0, indices.length ); - indices = correct; - } else if ( indices.length < rank() ) { - int[] correct = new int[rank()]; - System.arraycopy( indices, 0, correct, 0, indices.length ); - for ( int i = indices.length; i < rank(); i++ ) correct[i] = 0; - indices = correct; + if ( indices == null ) + throw new IllegalArgumentException( "Provided indices are null!" ); + if ( indices.length > this.rank() ) { + int[] correct = new int[rank()]; + System.arraycopy( indices, 0, correct, 0, indices.length ); + indices = correct; + } else if ( indices.length < rank() ) { + int[] correct = new int[rank()]; + System.arraycopy( indices, 0, correct, 0, indices.length ); + for ( int i = indices.length; i < rank(); i++ ) correct[i] = 0; + indices = correct; } - if ( this.isVirtual() && this.size() > 1 ) this.setIsVirtual( false ); - int i = getNDConf().indexOfIndices(indices); - this.getMut().setDataAt( i, item ); - return this; + if ( this.isVirtual() && this.size() > 1 ) this.setIsVirtual( false ); + int i = getNDConf().indexOfIndices(indices); + this.getMut().setDataAt( i, item ); + return this; } /** @@ -1058,77 +1069,77 @@ */ @Override public Tensor<V> putAt(Map<?,Integer> key, Nda<V> value ) { - _putAtCheckFor((Tensor<?>) value); - Tensor<V> slice = ( key == null ) ? this : getAt( key ); - return _putAt( slice, (Tensor<V>) value); + _putAtCheckFor((Tensor<?>) value); + Tensor<V> slice = ( key == null ) ? this : getAt( key ); + return _putAt( slice, (Tensor<V>) value); } private void _putAtCheckFor( Tensor<?> value ) { - if ( value.isEmpty() ) { - String message = "Provided tensor is empty! Empty tensors cannot be injected."; - _LOG.error( message ); - throw new IllegalArgumentException( message ); + if ( value.isEmpty() ) { + String message = "Provided tensor is empty! Empty tensors cannot be injected."; + _LOG.error( message ); + throw new IllegalArgumentException( message ); } - } + } private Tensor<V> _putAt(Tensor<V> slice, Tensor<V> value ) { - boolean valueIsDeviceVisitor = false; - if ( slice.isOutsourced() && !value.isOutsourced() ) { - Device<V> device = slice.getDevice(); + boolean valueIsDeviceVisitor = false; + if ( slice.isOutsourced() && !value.isOutsourced() ) { + Device<V> device = slice.getDevice(); try { - device.store( value ); - } catch ( Exception e ) { - _LOG.error( "Trying to migrate target slice tensor to device failed.", e ); - throw e; - } - valueIsDeviceVisitor = true; + device.store( value ); + } catch ( Exception e ) { + _LOG.error( "Trying to migrate target slice tensor to device failed.", e ); + throw e; + } + valueIsDeviceVisitor = true; } - Neureka.get().backend().getFunction().idy().call( slice, value ); + Neureka.get().backend().getFunction().idy().call( slice, value ); try { - if ( valueIsDeviceVisitor ) value.getDevice().restore( value ); - } catch ( Exception exception ) { - _LOG.error( "Trying to migrate source tensor back to original location failed.", exception ); - throw exception; - } - return this; + if ( valueIsDeviceVisitor ) value.getDevice().restore( value ); + } catch ( Exception exception ) { + _LOG.error( "Trying to migrate source tensor back to original location failed.", exception ); + throw exception; + } + return this; } /** * {@inheritDoc} */ @Override - public V getDataAt( int i ) { return getDevice().access( this ).readAt( i ); } + public V getDataAt( int i ) { return getDevice().access( this ).readAt( i ); } /** * {@inheritDoc} */ @Override public Tensor<V> setItemAt(int i, V o ) { - _guardMod("data object"); - NDConfiguration ndc = this.getNDConf(); - _setDataAt( ndc.indexOfIndex( i ), o ); - return this; + _guardMod("data object"); + NDConfiguration ndc = this.getNDConf(); + _setDataAt( ndc.indexOfIndex( i ), o ); + return this; } /** {@inheritDoc} */ @Override public Tensor<V> putAt(List<?> indices, V value ) { - if ( indices.stream().allMatch( i -> i instanceof Number ) ) - return setItemAt( indexOfIndices(indices.stream().mapToInt( i -> ((Number)i).intValue() ).toArray()), value ); + if ( indices.stream().allMatch( i -> i instanceof Number ) ) + return setItemAt( indexOfIndices(indices.stream().mapToInt( i -> ((Number)i).intValue() ).toArray()), value ); else - return this.putAt( indices, Tensor.ofAny( this.getItemType(), shape(), value ) ); + return this.putAt( indices, Tensor.ofAny( this.getItemType(), shape(), value ) ); } /** {@inheritDoc} */ - @Override public Tensor<V> putAt(int index, V value ) { return putAt( indicesOfIndex(index), value ); } + @Override public Tensor<V> putAt(int index, V value ) { return putAt( indicesOfIndex(index), value ); } private void _setDataAt( int i, V o ) { - if ( this.isVirtual() && i > 0 ) - throw new IllegalArgumentException("There is no data item at index "+i+" for this virtual tensor!"); + if ( this.isVirtual() && i > 0 ) + throw new IllegalArgumentException("There is no data item at index "+i+" for this virtual tensor!"); - getDevice().access( this ).write( o ).at( i ); - _version++; // Autograd must be warned! - } + getDevice().access( this ).write( o ).at( i ); + _version++; // Autograd must be warned! + } /** * {@inheritDoc} @@ -1136,22 +1147,22 @@ @Override public Tensor<V> setItems(Object value ) { - LogUtil.nullArgCheck( value, "value", Object.class ); - boolean success = true; - if ( Number.class.isAssignableFrom(value.getClass()) ) { // A virtual tensor! - this.setIsVirtual( true ); - value = DataConverter.get().convert( value, this.itemType() ); - this.getMut().setDataAt( 0, (V) value ); + LogUtil.nullArgCheck( value, "value", Object.class ); + boolean success = true; + if ( Number.class.isAssignableFrom(value.getClass()) ) { // A virtual tensor! + this.setIsVirtual( true ); + value = DataConverter.get().convert( value, this.itemType() ); + this.getMut().setDataAt( 0, (V) value ); } - else if ( value.getClass().isArray() ) - getDevice().access(this).writeFrom( value ); + else if ( value.getClass().isArray() ) + getDevice().access(this).writeFrom( value ); - else success = false; + else success = false; - if ( !success ) - _LOG.warn( "Failed to set value of type '"+value.getClass().getSimpleName()+"'!" ); + if ( !success ) + _LOG.warn( "Failed to set value of type '"+value.getClass().getSimpleName()+"'!" ); - return this; + return this; } /** @@ -1159,14 +1170,14 @@ */ @Override public Object getRawData() { - _guardGet("data object"); - return _getData( true ); + _guardGet("data object"); + return _getData( true ); } private Object _getData( boolean clone ) { - Device<V> device = this.getDevice(); - if ( device == null ) return null; - else return device.access( this ).readAll( clone ); + Device<V> device = this.getDevice(); + if ( device == null ) return null; + else return device.access( this ).readAll( clone ); } /** @@ -1174,11 +1185,11 @@ */ @Override public Object getRawItems() { - _guardGet("value object"); - if ( this.getNDConf().isSimple() && !this.isSlice() ) - return getDevice().access(this).readAll(!this.isOutsourced()); + _guardGet("value object"); + if ( this.getNDConf().isSimple() && !this.isSlice() ) + return getDevice().access(this).readAll(!this.isOutsourced()); else - return getDevice().access( this.deepCopy().setIsVirtual( false ) ).readAll(false); + return getDevice().access( this.deepCopy().setIsVirtual( false ) ).readAll(false); } /*================================================================================================================== @@ -1194,98 +1205,98 @@ @Override public BufferedImage asImage( ImageType type ) { - switch ( type.bufferType ) + switch ( type.bufferType ) { case BufferedImage.TYPE_3BYTE_BGR: { - _checkRankForImageConversion(type, Number.class, 0, 0, 3); + _checkRankForImageConversion(type, Number.class, 0, 0, 3); // We expect a tensor of shape (height x width x 3)! - BufferedImage image = new BufferedImage(shape(1), shape(0), type.bufferType); - byte[] data = DataConverter.get().convert( _getRawData(), byte[].class); - _writeImgData(new DataBufferByte(data, data.length), image); - return image; + BufferedImage image = new BufferedImage(shape(1), shape(0), type.bufferType); + byte[] data = DataConverter.get().convert( _getRawData(), byte[].class); + _writeImgData(new DataBufferByte(data, data.length), image); + return image; } case BufferedImage.TYPE_4BYTE_ABGR: case BufferedImage.TYPE_4BYTE_ABGR_PRE: { - _checkRankForImageConversion(type, Number.class, 0, 0, 4); - BufferedImage image = new BufferedImage(shape(1), shape(0), type.bufferType); - byte[] data = DataConverter.get().convert( _getRawData(), byte[].class); - _writeImgData(new DataBufferByte(data, data.length), image); - return image; + _checkRankForImageConversion(type, Number.class, 0, 0, 4); + BufferedImage image = new BufferedImage(shape(1), shape(0), type.bufferType); + byte[] data = DataConverter.get().convert( _getRawData(), byte[].class); + _writeImgData(new DataBufferByte(data, data.length), image); + return image; } case BufferedImage.TYPE_INT_ARGB: { - _checkRankForImageConversion(type, Number.class, 0, 0, 1); - BufferedImage image = new BufferedImage(shape(1), shape(0), type.bufferType); - int[] data = DataConverter.get().convert( _getRawData(), int[].class); - _writeImgData(new DataBufferInt(data, data.length), image); - return image; + _checkRankForImageConversion(type, Number.class, 0, 0, 1); + BufferedImage image = new BufferedImage(shape(1), shape(0), type.bufferType); + int[] data = DataConverter.get().convert( _getRawData(), int[].class); + _writeImgData(new DataBufferInt(data, data.length), image); + return image; } } - throw new IllegalArgumentException("Image type '"+type+"' not supported."); + throw new IllegalArgumentException("Image type '"+type+"' not supported."); } private void _checkRankForImageConversion( ImageType type, Class<?> dataType, int... pattern ) { - int rank = pattern.length; // The expected rank! - if ( this.rank() != rank ) { - throw new IllegalArgumentException( - "Cannot create image of type '" + type.name() + "' from tensor of rank " + this.rank() + ". " + + int rank = pattern.length; // The expected rank! + if ( this.rank() != rank ) { + throw new IllegalArgumentException( + "Cannot create image of type '" + type.name() + "' from tensor of rank " + this.rank() + ". " + "Expected to receive tensor of rank " + rank + "." ); } - for ( int i = 0; i < pattern.length; i++ ) { - int axisSize = pattern[ i ]; // The expected axis size! - if ( axisSize > 0 ) { - if ( axisSize != this.shape(i) ) { - String shape = this.shape().stream().map( a -> a.toString() ).collect(Collectors.joining("x")); - throw new IllegalArgumentException( - "Cannot create image of type '" + type.name() + "' from tensor with shape (" + shape + "). " + + for ( int i = 0; i < pattern.length; i++ ) { + int axisSize = pattern[ i ]; // The expected axis size! + if ( axisSize > 0 ) { + if ( axisSize != this.shape(i) ) { + String shape = this.shape().stream().map( a -> a.toString() ).collect(Collectors.joining("x")); + throw new IllegalArgumentException( + "Cannot create image of type '" + type.name() + "' from tensor with shape (" + shape + "). " + "Axis " + i + " is expected to be of size " + axisSize + "." ); } } } - if ( !dataType.isAssignableFrom(this.getItemType()) ) - throw new IllegalArgumentException( - "Cannot create image of type '" + type.name() + "' from tensor of type '" + this.getItemType().getSimpleName() + ". " + - "Expected to receive a tensor whose type is at least a sub-type of '" + dataType.getSimpleName() + "'." + if ( !dataType.isAssignableFrom(this.getItemType()) ) + throw new IllegalArgumentException( + "Cannot create image of type '" + type.name() + "' from tensor of type '" + this.getItemType().getSimpleName() + ". " + + "Expected to receive a tensor whose type is at least a sub-type of '" + dataType.getSimpleName() + "'." ); - } + } private static void _writeImgData( DataBuffer data, BufferedImage target ) { - target.setData( - Raster.createRaster( target.getSampleModel(), data, new Point() ) + target.setData( + Raster.createRaster( target.getSampleModel(), data, new Point() ) ); - } + } /** * {@inheritDoc} */ @Override public Tensor<V> addToGradient( Tensor<V> error ) { - _guardSet("gradient"); - Optional<Tensor> grad = this.find( Tensor.class ); - grad.ifPresent( gradient -> - this.set( - MemUtil.keep( gradient, error, () -> - Neureka.get() - .backend() - .getFunction() - .plusAssign() - .call(gradient, error) + _guardSet("gradient"); + Optional<Tensor> grad = this.find( Tensor.class ); + grad.ifPresent( gradient -> + this.set( + MemUtil.keep( gradient, error, () -> + Neureka.get() + .backend() + .getFunction() + .plusAssign() + .call(gradient, error) ) )); - if ( !grad.isPresent() ) { - this.set( error ); - this.find( Device.class ).ifPresent( device -> { + if ( !grad.isPresent() ) { + this.set( error ); + this.find( Device.class ).ifPresent( device -> { try { - device.store( error ) ; - } catch ( Exception exception ) { - _LOG.error( "Failed trying to store a given error to a device for gradient accumulation.", exception ); - throw exception; - } - }); + device.store( error ) ; + } catch ( Exception exception ) { + _LOG.error( "Failed trying to store a given error to a device for gradient accumulation.", exception ); + throw exception; + } + }); } - return this; + return this; } /** @@ -1294,16 +1305,16 @@ @Override public <T> T asType( Class<T> typeClass ) { - LogUtil.nullArgCheck( typeClass, "typeClass", Class.class ); - if ( typeClass == Tensor.class ) return (T) this; - if ( Number.class.isAssignableFrom( this.itemType()) && Number.class.isAssignableFrom(typeClass) ) { - DataConverter converter = DataConverter.get(); - return converter.convert( mean().at(0).get(), typeClass ); + LogUtil.nullArgCheck( typeClass, "typeClass", Class.class ); + if ( typeClass == Tensor.class ) return (T) this; + if ( Number.class.isAssignableFrom( this.itemType()) && Number.class.isAssignableFrom(typeClass) ) { + DataConverter converter = DataConverter.get(); + return converter.convert( mean().at(0).get(), typeClass ); } - if ( typeClass == String.class ) - return (T) this.toString(); + if ( typeClass == String.class ) + return (T) this.toString(); - throw new IllegalArgumentException("Failed to convert this tensor of type '"+getDataType()+"' to '"+typeClass+"'!"); + throw new IllegalArgumentException("Failed to convert this tensor of type '"+getDataType()+"' to '"+typeClass+"'!"); } /** @@ -1329,52 +1340,52 @@ */ private <T> Tensor<T> _toType( Class<T> typeClass ) { - DataType<V> newDataType = (DataType<V>) DataType.of( typeClass ); - if ( newDataType != this.getDataType() ) { - CPU.get().borrow((Tensor<Object>) this).in(()->{ - Object newData = _convertedDataOfType(typeClass); - _setData( null ); - _setData( getDevice().allocateFromAll( newDataType, this.getNDConf(), newData) ); - return null; + DataType<V> newDataType = (DataType<V>) DataType.of( typeClass ); + if ( newDataType != this.getDataType() ) { + CPU.get().borrow((Tensor<Object>) this).in(()->{ + Object newData = _convertedDataOfType(typeClass); + _setData( null ); + _setData( getDevice().allocateFromAll( newDataType, this.getNDConf(), newData) ); + return null; }); } - this.find( TensorImpl.class ).ifPresent(gradient -> gradient._toType( typeClass ) ); - return (Tensor<T>) this; + this.find( TensorImpl.class ).ifPresent(gradient -> gradient._toType( typeClass ) ); + return (Tensor<T>) this; } @Override public String toString() { - if ( this.isDeleted() ) return "deleted"; - else if ( this.isEmpty() ) return "empty"; - else if ( this.isUndefined() ) return "undefined"; - return NdaAsString.representing( this ).byDefaults().toString(); + if ( this.isDeleted() ) return "deleted"; + else if ( this.isEmpty() ) return "empty"; + else if ( this.isUndefined() ) return "undefined"; + return NdaAsString.representing( this ).byDefaults().toString(); } static int[][] makeFit( int[] sA, int[] sB ) { - int lastIndexOfA = 0; - for ( int i = sA.length-1; i >= 0; i-- ) { - if ( sA[ i ] != 1 ) { - lastIndexOfA = i; - break; + int lastIndexOfA = 0; + for ( int i = sA.length-1; i >= 0; i-- ) { + if ( sA[ i ] != 1 ) { + lastIndexOfA = i; + break; } } - int firstIndexOfB = 0; - for ( int i = 0; i < sB.length; i++ ) { - if ( sB[ i ] != 1 ) { - firstIndexOfB = i; - break; + int firstIndexOfB = 0; + for ( int i = 0; i < sB.length; i++ ) { + if ( sB[ i ] != 1 ) { + firstIndexOfB = i; + break; } } - int newSize = lastIndexOfA + sB.length - firstIndexOfB; - int[] rsA = new int[ newSize ]; - int[] rsB = new int[ newSize ]; - for( int i = 0; i <newSize; i++ ) { - if ( i <= lastIndexOfA ) rsA[ i ] = i; else rsA[ i ] = -1; - if ( i >= lastIndexOfA ) rsB[ i ] = i - lastIndexOfA+firstIndexOfB; else rsB[ i ] = -1; + int newSize = lastIndexOfA + sB.length - firstIndexOfB; + int[] rsA = new int[ newSize ]; + int[] rsB = new int[ newSize ]; + for( int i = 0; i <newSize; i++ ) { + if ( i <= lastIndexOfA ) rsA[ i ] = i; else rsA[ i ] = -1; + if ( i >= lastIndexOfA ) rsB[ i ] = i - lastIndexOfA+firstIndexOfB; else rsB[ i ] = -1; } - return new int[][]{ rsA, rsB }; + return new int[][]{ rsA, rsB }; } } - \ No newline at end of file + \ No newline at end of file diff --git a/docs/coverage/test/html/neureka/index.html b/docs/coverage/test/html/neureka/index.html index dd0f5bcb8..02f523a70 100644 --- a/docs/coverage/test/html/neureka/index.html +++ b/docs/coverage/test/html/neureka/index.html @@ -1 +1 @@ -neureka

neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,147 of 9,23287%160 of 75678%2039391571,54251560125
TensorImpl3462,60288%4224485%5125853511911401
Tensor2462,67391%3711976%46278284291020001
Nda15246875%153570%28901999166501
Neureka10126272%152158%1639168112101
Data544143%3350%91571761201
Neureka.Utility536354%675%277240301
Shape4920180%102066%103183821601
AbstractNda3660094%117186%1274810113301
MutateNda320%n/a22222211
MutateTensor296870%3350%383130501
TensorImpl.new MutateNda.Item() {...}4368%5337%6102102601
Nda.Item5078%41071%4132141601
TensorConstructor24998%31986%3180500701
Neureka.Settings15698%466%21813301501
Shape.new Shape() {...}9998%1688%2150200601
Neureka.Settings.DType5596%50%290140701
TensorImpl.new Data() {...}88%n/a15151501
Neureka.Settings.Debug4797%375%180130601
Tensor.ImageType117100%n/a020170201
Neureka.Settings.AutoGrad108100%31178%31702601001
TensorImpl.new Iterator() {...}44100%100%04080301
Neureka.Settings.View37100%n/a05050501
Neureka.Settings.NDim33100%100%05080401
Shape.new Iterator() {...}31100%100%04040301
AbstractNda.new TensorConstructor.API() {...}100%n/a04040401
\ No newline at end of file +neureka

neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,233 of 9,31086%180 of 76476%2219511741,55957568126
TensorImpl3802,60887%5324582%61266605201011601
Tensor2482,67191%3811875%47278284291020001
Nda15846274%173366%30901999166501
Neureka14621759%191747%1839228132101
Data544344%3350%91671861301
Shape4920180%102066%103183821601
AbstractNda4855492%146682%1370109713001
Neureka.Utility348270%675%276240301
MutateNda320%n/a22222211
MutateTensor296870%3350%383130501
TensorImpl.new MutateNda.Item() {...}4368%5337%6102102601
Nda.Item5078%41071%4132141601
NoOpData64%n/a25252501
TensorConstructor25998%1890%2170530701
TensorImpl.new Data() {...}76%n/a25252501
Neureka.Settings15698%466%21813301501
Shape.new Shape() {...}9998%1688%2150200601
Neureka.Settings.DType5596%50%290140701
Neureka.Settings.Debug4797%375%180130601
Tensor.ImageType162100%n/a020170201
Neureka.Settings.AutoGrad108100%31178%31702601001
TensorImpl.new Iterator() {...}44100%100%04080301
Neureka.Settings.View37100%n/a05050501
Neureka.Settings.NDim33100%100%05080401
Shape.new Iterator() {...}31100%100%04040301
TensorConstructor.Args25100%n/a07070701
\ No newline at end of file diff --git a/docs/coverage/test/html/neureka/index.source.html b/docs/coverage/test/html/neureka/index.source.html index 5dda8e5d9..5fb274068 100644 --- a/docs/coverage/test/html/neureka/index.source.html +++ b/docs/coverage/test/html/neureka/index.source.html @@ -1 +1 @@ -neureka

neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,147 of 9,23287%160 of 75678%2039391571,54251560125
TensorImpl.java3682,70488%4724984%58277565321212804
Tensor.java2462,79091%3711976%46280284461020202
Nda.java16651875%194570%3210321113177102
Neureka.java16076182%254966%261082420417108
Data.java544143%3350%91571761201
Shape.java5133186%123876%125086022503
AbstractNda.java3662394%117186%1278810513702
MutateNda.java320%n/a22222211
MutateTensor.java296870%3350%383130501
TensorConstructor.java24998%31986%3180500701
\ No newline at end of file +neureka

neureka

ElementMissed InstructionsCov.Missed BranchesCov.MissedCxtyMissedLinesMissedMethodsMissedClasses
Total1,233 of 9,31086%180 of 76476%2219511741,55957568126
TensorImpl.java4042,70887%5825081%69285645411413004
Tensor.java2482,83391%3811875%47280284461020202
Neureka.java18673579%294560%281082920437108
Nda.java17251274%214367%3410321113177102
Data.java544344%3350%91671861301
Shape.java5133186%123876%125086022503
AbstractNda.java4855492%146682%1370109713001
MutateNda.java320%n/a22222211
MutateTensor.java296870%3350%383130501
NoOpData.java64%n/a25252501
TensorConstructor.java28498%1890%22406001402
\ No newline at end of file diff --git a/docs/coverage/test/jacocoTestReport.xml b/docs/coverage/test/jacocoTestReport.xml index 524138c05..a739e4a62 100644 --- a/docs/coverage/test/jacocoTestReport.xml +++ b/docs/coverage/test/jacocoTestReport.xml @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/docs/jdocs/allclasses-frame.html b/docs/jdocs/allclasses-frame.html new file mode 100644 index 000000000..b8e3302f1 --- /dev/null +++ b/docs/jdocs/allclasses-frame.html @@ -0,0 +1,438 @@ + + + + + +All Classes (neureka 1.0.1 API) + + + + +

All Classes

+
+ +
+ + diff --git a/docs/jdocs/allclasses-index.html b/docs/jdocs/allclasses-index.html deleted file mode 100644 index 67f77acbb..000000000 --- a/docs/jdocs/allclasses-index.html +++ /dev/null @@ -1,1418 +0,0 @@ - - - - -All Classes and Interfaces (neureka 1.0.0 API) - - - - - - - - - - - - - - -
- -
-
-
-

All Classes and Interfaces

-
-
-
-
-
-
Class
-
Description
- -
 
- -
 
- -
-
Together with the Component interface, this class defines a simple - component system in which implementations of the Component interface - are managed by extensions of this AbstractComponentOwner.
-
- -
 
- -
-
This is the abstract precursor class providing - some useful implementations for core concepts which are most likely - applicable to most concrete implementations of the Device interface.
-
- -
-
This is a partial implementation of the Algorithm interface which implements - the component system for implementation instances of the ImplementationFor interface.
-
- -
 
- -
 
- -
-
This is the base class for implementations of the Algorithm interface.
-
- -
 
- -
-
The following is an abstract implementation of the NDConfiguration which offers a basis for - instantiation and caching of concrete implementations extending this abstract class.
-
- -
-
This abstract Operation implementation is a useful template for creating new operations.
-
- -
-
This interface is the declaration for - lambda actions for both the ADAction.act(ADTarget) method of the ADAction interface.
-
- -
-
Implementations of this functional interface ought to return a new instance - of the ADAction class responsible for performing automatic differentiation - both for forward and backward mode differentiation.
-
-
AdaGrad<V extends Number>
-
-
Adaptive Gradients, or AdaGrad for short, is an extension of the gradient descent optimization - algorithm that adjusts the step size for each parameter based on the squared gradients - seen over the course of previous optimization steps.
-
- -
 
-
ADAM<V extends Number>
-
-
ADAM (short for Adaptive Moment Estimation) is an adaptive learning rate optimization algorithm that utilises both - momentum and scaling, combining the benefits of RMSProp and SGD with respect to Momentum.
-
- -
 
- -
 
- -
-
A ADSupportPredicate lambda checks which auto differentiation mode - can be performed for a given ExecutionCall.
-
- -
-
This is simply a wrapper for useful information needed by implementations of - the ADAction and ADAction interfaces to perform error propagation.
-
- -
-
This class is the middle layer of the 3 tier compositional architecture of this backend, which - consists of Operations, Algorithms and - in case of a DeviceAlgorithm also ImplementationFor.
-
- -
 
- -
-
Extend this class to define additional meta arguments for Functions.
-
- -
 
- -
 
- -
-
This is an import argument whose - role might not be clear at first : - An operation can have multiple inputs, however - when calculating the derivative for a forward or backward pass - then one must know which derivative ought to be calculated.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
The following argument is relevant for a particular type of operation, namely: an "indexer".
-
- -
 
- -
 
-
At<K,R>
-
 
- -
 
- -
-
This class represents the labeled axis of an NDFrame.
-
- -
 
- -
 
- -
-
This is the starting point of the call transition graph exposed by the slice builder API.
-
- -
 
- -
 
- -
-
The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y - are vectors each with a number of elements that equals n.
-
- -
-
Instances of this class are execution contexts hosting Operation instances which receive Tensor - instances for execution.
-
- -
-
This is a very simple class with a single purpose, namely - it exposes methods which receive lambda instances in order to then execute them - in a given BackendContext, just to then switch back to the original context again.
-
- -
-
Implementations of this might introduce CUDA or ROCM to Neureka.
-
- -
-
This class describes an available Device implementation found for a given BackendExtension.
-
- -
 
- -
 
- -
-
How much memory, and how many threads share that memory.
-
- -
 
- -
 
- -
 
- -
-
This is a simple, fixed size cache for immutable objects which are - shared throughout the library runtime...
-
- -
-
Lazy cache entries are entries whose values will be calculated - only when the entry is being stored in the cache.
-
- -
-
Instances of this class model simple execution calls to the backend.
-
-
Call.Builder<V,T extends Device<V>>
-
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
This is an OpenCL context component for any given BackendContext which - extends a given backend context instance for additional functionality, which in - this case is the OpenCL backend storing platform and device information.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
Performs a dot product on two vectors using OpenCL.
-
- -
 
- -
-
Turns a Function into OpenCL kernel code to make - optimized just in time compilation possible.
-
- -
 
- -
-
This class is the ExecutorFor < OpenCLDevice > implementation - used to properly call an OpenCLDevice instance via the - ExecutionOn < OpenCLDevice > lambda implementation - receiving an instance of the ExecutionCall class.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
OpenCL related settings for the CLBackend extension.
-
- -
 
- -
-
Stuff common to Hardware and ConcreteMachine.
-
- -
-
This interface alongside the AbstractComponentOwner class define a simple component system.
-
- -
-
Entries of this enum represent events describing updates to the state - of the owner of a given Component instance.
-
- -
-
Component.OwnerChangeRequest implementation instances will be passed to - the Component.update(OwnerChangeRequest) method which inform a - given component about a state change related to said component.
-
- -
-
A component owner is a thing holding components which can be accessed by their type class.
-
- -
 
- -
 
- -
 
- -
-
The ?copy routines perform a vector-vector operation defined as y = x, where x and y are vectors.
-
- -
 
- -
-
The CPU class, one of many implementations of the Device interface, - is simply supposed to be an API for dispatching threaded workloads onto the CPU - as well as reading from or writing to tensors it stores.
-
- -
 
- -
-
The CPU.JVMExecutor offers a similar functionality as the parallel stream API, - however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas - instead of simply exposing a single index or concrete elements for a given workload size.
-
- -
-
A simple functional interface for executing a range whose implementations will - either be executed sequentially or they are being dispatched to - a thread-pool, given that the provided workload is large enough.
-
- -
-
This class loads the CPU operations into the Neureka library context.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
This class is a wrapper class for the ImplementationFor<CPU> interface - which enables a functional style of implementing the backend API!
- It is used merely as a simple formality and implementation type specification.
-
- -
-
This is represents the second step in the simple builder API for CPUImplementation instances.
-
- -
-
This is a library internal class, do not depend on this.
-
- -
 
- -
-
An implementation of the min and max algorithm running on the CPU.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
An implementation of the sum and may algorithm running on the CPU.
-
- -
-
This class is one of many extensions of the AbstractFileHandle which - is therefore ultimately an implementation of the FileHandle interface.
-
- -
-
An abstract class for NDConfigurations which are representing - tensors of rank 1, meaning the name of this class translates to "Dimension-1-Configuration".
-
- -
-
An abstract class for NDConfigurations which are representing - tensors of rank 2, meaning the name of this class translates to "Dimension-^2-Configuration".
-
- -
-
An abstract class for NDConfigurations which are representing - tensors of rank 3, meaning the name of this class translates to "Dimension-3-Configuration".
-
- -
-
A wrapper type for the raw data array of a tensor/nd-array, - which is typically provided by implementations of the Device interface.
-
- -
-
This class is a singleton.
-
- -
-
This is a stateful and parallelized converter for converting the internal data array of a tensor - to another data array based on a provided lambda.
-
- -
-
This is a static utility class containing the actual conversion logic - which is usually referenced by the Converter lambdas via method signatures...
-
- -
-
This class is a Multiton implementation for wrapping and representing type classes.
-
- -
-
Implementations of this represent computational - devices for storing tensors (instances of the Tensor<V> class), which may - also expose a useful API for executing operations on tensors (used in backend operations).
-
- -
-
Implementations of this represent the access to tensors stored on a device - in order to read from or write to said tensor.
-
- -
-
The second part of the method chain of the fluent API for executing - tensors on this Device temporarily.
-
- -
-
Instances of this complete a request for writing to an accessed tensor stored on a device.
-
- -
-
A DeviceAlgorithm is an advanced form of Algorithm which - delegates the execution to implementations of ImplementationFor specific Device types.
-
- -
 
- -
-
A sub-interface of the Data interface providing - more device specific methods.
-
- -
-
A program that queries and prints information about all - available devices.
-
- -
 
- -
 
- -
 
- -
-
The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are - elements of vectors x and y.
-
- -
 
- -
 
- -
-
This is lambda based Algorithm implementation - providing some basic functionality for implementing custom - activation functions.
-
- -
-
Methods inside this utility class execute only some ExecutionCall arguments - in groups if their total number exceeds the arity of an operation.
-
- -
-
Implementations of this functional interface - is supposed to be the final execution procedure responsible for dispatching - the execution further into the backend.
-
-
ExecutionCall<D extends Device<?>>
-
-
This class is a simple container holding references to a targeted - Device, Operation and maybe some case specific - meta Args needed to execute - an array of input tensors which are also wrapped by this.
-
- -
 
- -
-
An Algorithm will typically produce a result when executing an ExecutionCall.
-
- -
 
- -
-
This is an internal class for managing the extension of any given BackendContext class.
-
- -
 
- -
 
- -
 
- -
-
The FileDevice is a Device implementation - responsible for reading tensors from and or writing them to a given directory.
-
- -
 
- -
-
Implementations of this ought to map the index of a - tensor entry to a value which should be placed at that entry position.
-
- -
 
- -
-
This is the starting point for defining the slice range of a specified axis within - the call transition graph exposed by the slice builder API.
-
- -
 
- -
 
- -
-
Besides the Tensor class, which is the core class of Neureka, this interface and its implementations - represents the second most important feature of this library.
-
- -
-
An API for calling a Function after having specified - a set of Arg instances through the Function.with(Args) - method.
-
- -
-
This class is part of a given BackendContext instance - responsible for caching Function references based on - their String representation generated by Object.toString() - as well as caching of results for active functions.
-
- -
-
Instances of this implementation of the Function interface - are leave nodes within the abstract syntax tree of a function, representing constant numeric values to a function.
-
- -
-
Instances of this implementation of the Function interface - are leave nodes within the abstract syntax tree of a function, representing inputs to a function.
-
- -
-
The most common type of Function which references other Functions to - form an abstract syntax tree.
-
- -
-
The FunctionParser takes a BackendContext instance based on which - it builds Function implementation instances, usually by parsing Strings.
-
- -
 
- -
-
Instances of this implementation of the Function interface - are leave nodes within the abstract syntax tree of a function, representing indexed inputs to a function.
-
- -
 
- -
-
The Self Gated Softsign Unit is based on the Softsign function - (a computationally cheap non-exponential quasi Tanh) - making it a polynomially based version of the GaTU function which - is itself based on the Tanh function.
-
- -
-
The Self Gated Tanh Unit is based on the Tanh - making it an exponentiation based version of the GaSU function which - is itself based on the Softsign function - (a computationally cheap non-exponential quasi Tanh).
-
- -
 
- -
 
- -
-
The GELU activation function is based on the standard Gaussian cumulative distribution function - and is defined as x Φ( x ) and implemented as x * sigmoid(x * 1.702).
-
- -
-
A collection of primitive sub-routines for matrix multiplication performed on - continuous arrays which are designed so that they can be vectorized by the - JVMs JIT compiler (AVX instructions).
-
- -
 
- -
 
- -
 
- -
-
Instances of the GraphNode class are components of tensors (Tensor instances) - which model and record computations / operations between them.
-
- -
 
- -
-
This models the cache levels and threads of a CPU - using an array of where each entry represents a memory level.
-
- -
 
- -
 
- -
 
- -
-
The following abstract class implements some basic logic which - is applicable across all final concrete classes extending this abstract one.
-
- -
-
The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y - are vectors each with a number of elements that equals n.
-
- -
 
- -
-
The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are - elements of vectors x and y.
-
- -
-
This class is one of many extensions of the AbstractFileHandle which - is therefore ultimately an implementation of the FileHandle interface.
-
- -
-
A collection of primitive sub-routines for matrix multiplication performed on - continuous arrays which are designed so that they can be vectorized by the - JVMs JIT compiler (AVX instructions).
-
- -
 
- -
 
- -
-
Generally speaking, this interface describes the functionality of an implementation - of an execution procedure tailored to a specific Device (interface) instance - and Algorithm (interface) instance! - Instances of implementations of the ImplementationFor interface are components - of instances of implementations of the Algorithm interface, - which themselves are components of Operation implementation instances.
-
- -
 
- -
 
- -
 
- -
-
This class keeps track of graph nodes which require - back-propagation in order - to be able to continue the process at a later point in time - (based on some configurable conditions).
-
- -
-
A fixed sized cache for ad-hoc (just in time compiled) OpenCLDevice kernels.
-
- -
-
Instances of this class are utility factories provided by OpenCLDevice instances.
-
- -
 
- -
-
Provides kernel source code for a provided ExecutionCall.
-
- -
-
This will simply fetch a variable from a lambda once and then continuously - return this one value.
-
- -
-
This is a simple utility class which traverses nested data structures and converts them into - information which can be used to instantiate a tensor, - namely: A flat data array, a shape array and a type class.
-
- -
 
- -
 
- -
 
- -
 
- -
-
A utility class for message formatting.
-
- -
 
- -
 
- -
 
- -
-
Utility methods for deleting tensors or preventing thereof.
-
- -
-
This class validates the states of tensors with respect to memory management - before and after a lambda executes a function or some kind of algorithm on said tensors.
-
- -
 
- -
 
- -
 
- -
 
-
Momentum<V extends Number>
-
 
- -
 
- -
 
- -
-
Nd-arrays should be used as immutable data structures mostly, however sometimes it - is important to mutate their state for performance reasons.
-
- -
-
Instances of this are being returned by the Nda.at(int...) method, - and they allow you to get or set individual nd-array items
-
- -
-
Tensors should be considered immutable, however sometimes it - is important to mutate their state for performance reasons.
-
- -
-
Nda, which is an abbreviation of 'N-Dimensional-Array', represents - a multidimensional, homogeneously filled fixed-size array of items.
-
- -
-
Instances of this are being returned by the Nda.at(int...) method, - and they allow you to get individual nd-array items
-
- -
-
This class is in essence a simple wrapper class for a tensor and a StringBuilder - Methods in this class use the builder in order to construct a String representation - for said tensor.
-
- -
-
A builder interface providing multiple different options for building - a NdaAsString instance in a fluent way.
-
- -
-
This class is a simple utility class which contains - a collection of static and stateless methods containing - useful functionalities for tensor stringification.
-
- -
-
This is the implementation of the fluent builder API for creating Nda/Tensor instances.
-
- -
-
This interface represents the access pattern configuration for the data array of a tensor.
-
- -
-
Implementations of this are produced and returned by the NDConfiguration.getIndexToIndexAccessPattern() - and their purpose is to translate the item index of a tensor to the index of the - item within the underlying data array of said tensor.
-
- -
-
Types of common data layouts:
- - ROW_MAJOR
-
- -
-
This utility class provides static methods which are helpful - for nd-configuration related operations like reshaping, - incrementing or decrementing index arrays...
-
- -
 
- -
 
- -
-
Instances of this class are components of tensors, which store aliases for the indices of the tensor.
-
- -
-
This interface defines the most essential methods - of the nd-array/tensor API, which describe them - with respect to their dimensionality.
-
- -
-
An NDIterator is used to iterate over n-dimensional arrays.
-
- -
-
Defines if a new NDIterator is allowed to be a VirtualNDIterator.
-
- -
-
This is simply a mutable container for configuring how Tensor - instances ought to be converted to Strings.
-
- -
 
- -
-
Static utility methods for the NDArray.
-
- -
-
Neureka is the key access point for thread local / global library settings ( seeNeureka.Settings) - as well as execution contexts (see BackendContext) - and pre-instantiated Functions.
-
- -
 
- -
-
This interface enables "Polymorphic" utility by defining common functionalities - used for handling various numeric types.
-
- -
-
This class models OpenCL supporting accelerator hardware like GPUs or FPGAs - for storing tensors and executing operations on them.
-
- -
 
- -
 
- -
-
This class models the OpenCL concept of platforms, which refer to device - vendors / or vendor OpenCL runtime drivers.
-
- -
-
This interface is part of the backend API, and it embodies the top layer of the 3 tier backend architecture.
-
- -
-
This builder class builds instances of the Operation interface.
-
- -
 
- -
 
- -
 
- -
-
Optimizers are tensor components which implement the Optimization (functional) - interface applying various optimization algorithms to the gradients of tensors.
-
- -
 
- -
-
A set of standard levels of parallelism derived from the number of available cores and optionally capped by - reserving a specified amount of memory per thread.
-
- -
 
- -
-
Utility for parsing function expressions.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
This type of operation belongs to the same species as the - Summation operation.
-
- -
 
- -
-
This Operation takes an optional user seed, - the shape of its input tensor, and - the indices of individual elements within said tensor to generate - floats or doubles with a gaussian distribution where the mean - is 0 and the standard deviation is 1.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
This class is an important tensor component responsible for - managing the relationships between slices and the tensors from which - they have been derived.
-
- -
 
- -
 
- -
 
- -
 
- -
-
An immutable wrapper for a tensor as a result of anb Execution - as well as an ADActionSupplier for providing auto-differentiation support.
-
-
RMSProp<V extends Number>
-
-
Root Mean Squared Propagation, or RMSProp, - is an extension of gradient descent and the AdaGrad version of gradient - descent that uses a decaying average of partial gradients in the adaptation of the - step size for each parameter.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
The Self Gated ScalarSoftsign Unit is based on the ScalarSoftsign function - (a computationally cheap non-exponential quasi ScalarTanh) - making it a polynomially based version of the ScalarGaTU function which - is itself based on the ScalarTanh function.
-
- -
-
The Self Gated ScalarTanh Unit is based on the ScalarTanh - making it an exponentiation based version of the ScalarGaSU function which - is itself based on the ScalarSoftsign function - (a computationally cheap non-exponential quasi ScalarTanh).
-
- -
 
- -
 
- -
-
The GELU activation function is based on the standard Gaussian cumulative distribution function - and is defined as x Φ( x ) and implemented as x * sigmoid(x * 1.702).
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
The Scaled Exponential Linear Unit, or SELU, is an activation - function that induces self-normalizing properties.
-
- -
 
- -
-
The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x).
-
- -
 
- -
-
SoftPlus is a smooth approximation to the ReLU function and can be used - to constrain the output of a machine to always be positive.
-
- -
-
The softsign function, defined as x / ( 1 + Math.abs( x ) ), - is a computationally cheap 0 centered activation function - which rescales the inputs between -1 and 1, very much like the ScalarTanh function.
-
- -
 
- -
 
- -
 
- -
 
- -
-
The Scaled Exponential Linear Unit, or SELU, is an activation - functions that induce self-normalizing properties.
-
- -
 
- -
-
This class is a helper class for Neureka instances (Thread local singletons).
-
- -
-
Stochastic Gradient Descent is an iterative optimization technique - that uses the gradient of a weight variable to adjust said variable, - in order to reduce the error used to calculate said gradient.
-
- -
 
- -
-
Basically a tuple of integers which is used to describe the shape of an array.
-
- -
 
- -
-
The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x).
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
This class is the heart of the slice builder API, collecting range configurations by - exposing an API consisting of multiple interfaces which form a call state transition graph.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
This class is responsible for receiving any input and trying to interpret it so that a - slice can be formed.
-
- -
-
SoftPlus is a smooth approximation to the ReLU function and can be used - to constrain the output of a machine to always be positive.
-
- -
-
The softsign function, defined as x / ( 1 + Math.abs( x ) ), - is a computationally cheap 0 centered activation function - which rescales the inputs between -1 and 1, very much like the Tanh function.
-
- -
 
- -
 
- -
-
This interface defines the last step in the call transition graph of the fluent builder API when - building a Tensor instance populated based on the values within a defined range.
-
- -
 
- -
-
This interface extends the AxisOrGet interface which provides the option to either continue - slicing another axis or simply trigger the creation and return of a slice instance based on the - already provided slice configuration.
-
- -
 
- -
-
This is an abstract interface which simply describes "a thing that stores tensors".
-
- -
 
- -
-
The SuitabilityPredicate checks if a given instance of an ExecutionCall is - suitable to be executed in ImplementationFor - residing in this Algorithm as components.
-
- -
 
- -
 
- -
-
This type of operation belongs to the same species as the - Product operation.
-
- -
 
- -
 
- -
-
A Tensor is a mathematical concept and type of multidimensional - data-structure with certain transformation properties.
-
- -
-
Use this enum as argument for the Tensor.asImage(Tensor.ImageType) method to - specify the type of image that should be returned.
-
-
To<V>
-
-
This step in the call transition graph of the fluent builder API is a followup call - from the IterByOrIterFromOrAll.andFillFrom(Object) method which - expects a range to be specified whose values will be used to populate the Tensor instance.
-
-
To<V>
-
-
This is the second part for defining the slice range of a specified axis within - the call transition graph exposed by the slice builder API.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
VirtualNDConfigurations represent tensors which - are filled homogeneously with a single value exclusively, - like for example a tensor filled with only zeros.
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
An API for registering workloads which will be divided into smaller workloads so that they can - be executed efficiently by a thread pool...
-
- -
-
Divides workloads until they can be processed efficiently - and then submits them to a thread pool for execution...
-
- -
 
- -
 
-
-
-
-
-
-
- - diff --git a/docs/jdocs/allclasses.html b/docs/jdocs/allclasses-noframe.html similarity index 96% rename from docs/jdocs/allclasses.html rename to docs/jdocs/allclasses-noframe.html index 5da1d2a23..db92e8e0d 100644 --- a/docs/jdocs/allclasses.html +++ b/docs/jdocs/allclasses-noframe.html @@ -1,24 +1,15 @@ - + -All Classes (neureka 0.21.0 API) - +All Classes (neureka 1.0.1 API) - - - - - -

All Classes

-
+
-
+ diff --git a/docs/jdocs/allpackages-index.html b/docs/jdocs/allpackages-index.html deleted file mode 100644 index 7e2872b77..000000000 --- a/docs/jdocs/allpackages-index.html +++ /dev/null @@ -1,261 +0,0 @@ - - - - -All Packages (neureka 1.0.0 API) - - - - - - - - - - - - - - -
- -
-
-
-

All Packages

-
-
Package Summary
-
-
Package
-
Description
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
 
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!
-
- -
 
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!
-
- -
-
Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!
-
- -
 
- -
 
- -
 
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
-
Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
-
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
-
-
-
-
- - diff --git a/docs/jdocs/constant-values.html b/docs/jdocs/constant-values.html index 4ab02cace..d27e6c3db 100644 --- a/docs/jdocs/constant-values.html +++ b/docs/jdocs/constant-values.html @@ -1,160 +1,300 @@ - + + - -Constant Field Values (neureka 1.0.0 API) - - - - + +Constant Field Values (neureka 1.0.1 API) - - - - - - + + -
- -
-

Constant Field Values

-

Contents

-
-
+
+ +

neureka.backend.*

-
    -
  • -
    neureka.backend.api.fun.SuitabilityPredicate
    -
    -
    Modifier and Type
    -
    Constant Field
    -
    Value
    -
    public static final float
    - -
    0.25f
    -
    public static final float
    - -
    0.875f
    -
    public static final float
    - -
    0.625f
    -
    public static final float
    - -
    0.375f
    -
    public static final float
    - -
    0.5f
    -
    public static final float
    - -
    1.0f
    -
    public static final float
    - -
    0.125f
    -
    public static final float
    - -
    0.0f
    -
    public static final float
    - -
    0.75f
    -
    +
      +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      neureka.backend.api.fun.SuitabilityPredicate 
      Modifier and TypeConstant FieldValue
      + +public static final floatBAD0.25f
      + +public static final floatEXCELLENT0.875f
      + +public static final floatGOOD0.625f
      + +public static final floatNOT_GOOD0.375f
      + +public static final floatOKAY0.5f
      + +public static final floatPERFECT1.0f
      + +public static final floatTERRIBLE0.125f
      + +public static final floatUNSUITABLE0.0f
      + +public static final floatVERY_GOOD0.75f
    -
      -
    • -
      neureka.backend.main.implementations.broadcast.CLScalarBroadcast
      -
      -
      Modifier and Type
      -
      Constant Field
      -
      Value
      -
      protected static final String
      - -
      "#DATA_TYPE#"
      -
      +
        +
      • + + + + + + + + + + + + + + +
        neureka.backend.main.implementations.broadcast.CLScalarBroadcast 
        Modifier and TypeConstant FieldValue
        + +protected static final java.lang.StringTYPE"#DATA_TYPE#"
      -
-
+ + +

neureka.devices.*

-
    -
  • -
    neureka.devices.host.CPU
    -
    -
    Modifier and Type
    -
    Constant Field
    -
    Value
    -
    public static final int
    - -
    32
    -
    public static final String
    - -
    "neureka-daemon"
    -
    + -
      -
    • -
      neureka.devices.host.machine.CommonMachine
      -
      -
      Modifier and Type
      -
      Constant Field
      -
      Value
      -
      protected static final long
      - -
      1024L
      -
      +
        +
      • + + + + + + + + + + + + + + +
        neureka.devices.host.machine.CommonMachine 
        Modifier and TypeConstant FieldValue
        + +protected static final longK1024L
      • -
      • -
        neureka.devices.host.machine.Hardware
        -
        -
        Modifier and Type
        -
        Constant Field
        -
        Value
        -
        public static final long
        - -
        4096L
        -
        +
      • + + + + + + + + + + + + + + +
        neureka.devices.host.machine.Hardware 
        Modifier and TypeConstant FieldValue
        + +public static final longOS_MEMORY_PAGE_SIZE4096L
      -
-
+ +
+ + + + + + + +
+ + diff --git a/docs/jdocs/deprecated-list.html b/docs/jdocs/deprecated-list.html index ee94d58e2..1b6cd5bc5 100644 --- a/docs/jdocs/deprecated-list.html +++ b/docs/jdocs/deprecated-list.html @@ -1,74 +1,143 @@ - + + - -Deprecated List (neureka 1.0.0 API) - - - - + +Deprecated List (neureka 1.0.1 API) - - - - - - + + -
- -
-

Deprecated API

Contents

-
+ +
+ + + + + + + +
+ + diff --git a/docs/jdocs/help-doc.html b/docs/jdocs/help-doc.html index 43586fd99..44f0adfd1 100644 --- a/docs/jdocs/help-doc.html +++ b/docs/jdocs/help-doc.html @@ -1,186 +1,222 @@ - + + - -API Help (neureka 1.0.0 API) - - - - + +API Help (neureka 1.0.1 API) - - - - - - + + -
- -
-
-

JavaDoc Help

-
    -
  • Navigation: -
      -
    • Search
    • + - -
    • Kinds of Pages: - -
      -
      -

      Navigation

      -Starting from the Overview page, you can browse the documentation using the links in each page, and in the navigation bar at the top of each page. The Index and Search box allow you to navigate to specific declarations and summary pages, including: All Packages, All Classes and Interfaces - +
      + +
      + + +
      + +
      +

      How This API Document Is Organized

      +
      This API (Application Programming Interface) document has pages corresponding to the items in the navigation bar, described as follows.
      -
      -
      -

      Kinds of Pages

      -The following sections describe the different kinds of pages in this collection. -
      -

      Overview

      -

      The Overview page is the front page of this API document and provides a list of all packages with a summary for each. This page can also contain an overall description of the set of packages.

      -
      -
      -

      Package

      -

      Each package has a page that contains a list of its classes and interfaces, with a summary for each. These pages may contain the following categories:

      -
        -
      • Interfaces
      • +
        +
          +
        • +

          Overview

          +

          The Overview page is the front page of this API document and provides a list of all packages with a summary for each. This page can also contain an overall description of the set of packages.

          +
        • +
        • +

          Package

          +

          Each package has a page that contains a list of its classes and interfaces, with a summary for each. This page can contain six categories:

          +
            +
          • Interfaces (italic)
          • Classes
          • -
          • Enum Classes
          • +
          • Enums
          • Exceptions
          • Errors
          • -
          • Annotation Interfaces
          • -
          -
      -
      -

      Class or Interface

      -

      Each class, interface, nested class and nested interface has its own separate page. Each of these pages has three sections consisting of a declaration and description, member summary tables, and detailed member descriptions. Entries in each of these sections are omitted if they are empty or not applicable.

      -
        -
      • Class Inheritance Diagram
      • +
      • Annotation Types
      • +
      +
    • +
    • +

      Class/Interface

      +

      Each class, interface, nested class and nested interface has its own separate page. Each of these pages has three sections consisting of a class/interface description, summary tables, and detailed member descriptions:

      +
        +
      • Class inheritance diagram
      • Direct Subclasses
      • All Known Subinterfaces
      • All Known Implementing Classes
      • -
      • Class or Interface Declaration
      • -
      • Class or Interface Description
      • +
      • Class/interface declaration
      • +
      • Class/interface description
      -
      -
        +
        • Nested Class Summary
        • -
        • Enum Constant Summary
        • Field Summary
        • -
        • Property Summary
        • Constructor Summary
        • Method Summary
        • +
        +
          +
        • Field Detail
        • +
        • Constructor Detail
        • +
        • Method Detail
        • +
        +

        Each summary entry contains the first sentence from the detailed description for that item. The summary entries are alphabetical, while the detailed descriptions are in the order they appear in the source code. This preserves the logical groupings established by the programmer.

        + +
      • +

        Annotation Type

        +

        Each annotation type has its own separate page with the following sections:

        +
          +
        • Annotation Type declaration
        • +
        • Annotation Type description
        • Required Element Summary
        • Optional Element Summary
        • +
        • Element Detail
        -
        -
          -
        • Enum Constant Details
        • -
        • Field Details
        • -
        • Property Details
        • -
        • Constructor Details
        • -
        • Method Details
        • -
        • Element Details
        • -
        -

        Note: Annotation interfaces have required and optional elements, but not methods. Only enum classes have enum constants. The components of a record class are displayed as part of the declaration of the record class. Properties are a feature of JavaFX.

        -

        The summary entries are alphabetical, while the detailed descriptions are in the order they appear in the source code. This preserves the logical groupings established by the programmer.

        - -
        -

        Other Files

        -

        Packages and modules may contain pages with additional information related to the declarations nearby.

        -
        -
        -

        Tree (Class Hierarchy)

        -

        There is a Class Hierarchy page for all packages, plus a hierarchy for each package. Each hierarchy page contains a list of classes and a list of interfaces. Classes are organized by inheritance structure starting with java.lang.Object. Interfaces do not inherit from java.lang.Object.

        -
          -
        • When viewing the Overview page, clicking on TREE displays the hierarchy for all packages.
        • -
        • When viewing a particular package, class or interface page, clicking on TREE displays the hierarchy for only that package.
        • -
        -
        -
        -

        Deprecated API

        -

        The Deprecated API page lists all of the API that have been deprecated. A deprecated API is not recommended for use, generally due to shortcomings, and a replacement API is usually given. Deprecated APIs may be removed in future implementations.

        -
        -
        -

        Constant Field Values

        +
      • +
      • +

        Enum

        +

        Each enum has its own separate page with the following sections:

        +
          +
        • Enum declaration
        • +
        • Enum description
        • +
        • Enum Constant Summary
        • +
        • Enum Constant Detail
        • +
        +
      • +
      • +

        Tree (Class Hierarchy)

        +

        There is a Class Hierarchy page for all packages, plus a hierarchy for each package. Each hierarchy page contains a list of classes and a list of interfaces. The classes are organized by inheritance structure starting with java.lang.Object. The interfaces do not inherit from java.lang.Object.

        +
          +
        • When viewing the Overview page, clicking on "Tree" displays the hierarchy for all packages.
        • +
        • When viewing a particular package, class or interface page, clicking "Tree" displays the hierarchy for only that package.
        • +
        +
      • +
      • +

        Deprecated API

        +

        The Deprecated API page lists all of the API that have been deprecated. A deprecated API is not recommended for use, generally due to improvements, and a replacement API is usually given. Deprecated APIs may be removed in future implementations.

        +
      • +
      • +

        Index

        +

        The Index contains an alphabetic list of all classes, interfaces, constructors, methods, and fields.

        +
      • +
      • +

        Prev/Next

        +

        These links take you to the next or previous class, interface, package, or related page.

        +
      • +
      • +

        Frames/No Frames

        +

        These links show and hide the HTML frames. All pages are available with or without frames.

        +
      • +
      • +

        All Classes

        +

        The All Classes link shows all classes and interfaces except non-static nested types.

        +
      • +
      • +

        Serialized Form

        +

        Each serializable or externalizable class has a description of its serialization fields and methods. This information is of interest to re-implementors, not to developers using the API. While there is no link in the navigation bar, you can get to this information by going to any serialized class and clicking "Serialized Form" in the "See also" section of the class description.

        +
      • +
      • +

        Constant Field Values

        The Constant Field Values page lists the static final fields and their values.

        - -
        -

        All Packages

        -

        The All Packages page contains an alphabetic index of all packages contained in the documentation.

        -
        -
        -

        All Classes and Interfaces

        -

        The All Classes and Interfaces page contains an alphabetic index of all classes and interfaces contained in the documentation, including annotation interfaces, enum classes, and record classes.

        -
        -
        -

        Index

        -

        The Index contains an alphabetic index of all classes, interfaces, constructors, methods, and fields in the documentation, as well as summary pages such as All Packages, All Classes and Interfaces.

        -
        -
-
-This help file applies to API documentation generated by the standard doclet. + + +This help file applies to API documentation generated using the standard doclet.
+ +
+ + + + + + +
+ + diff --git a/docs/jdocs/index-all.html b/docs/jdocs/index-all.html index 35df86c86..fcc508198 100644 --- a/docs/jdocs/index-all.html +++ b/docs/jdocs/index-all.html @@ -1,491 +1,500 @@ - + + - -Index (neureka 1.0.0 API) - - - - + +Index (neureka 1.0.1 API) - - - - - - + + -
- -
-
-
-

Index

-
-A B C D E F G H I J K L M N O P Q R S T U V W X _ 
All Classes and Interfaces|All Packages|Constant Field Values -

A

-
-
ABGR_4BYTE - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
ABGR_PRE_4BYTE - Enum constant in enum class neureka.Tensor.ImageType
+
A B C D E F G H I J K L M N O P Q R S T U V W X _  + + +

A

+
+
abs() - Method in class neureka.math.Functions
 
-
abs() - Method in class neureka.math.Functions
-
 
-
abs() - Method in interface neureka.Tensor
+
abs() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
Absolute - Class in neureka.backend.main.operations.functions
+
ABSOLUTE - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Absolute() - Constructor for class neureka.backend.main.operations.functions.Absolute
+
Absolute - Class in neureka.backend.main.operations.functions
 
-
ABSOLUTE - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
Absolute() - Constructor for class neureka.backend.main.operations.functions.Absolute
 
-
AbstractBaseDevice<V> - Class in neureka.devices
+
AbstractBaseDevice<V> - Class in neureka.devices
 
-
AbstractBaseDevice() - Constructor for class neureka.devices.AbstractBaseDevice
+
AbstractBaseDevice() - Constructor for class neureka.devices.AbstractBaseDevice
 
-
AbstractComponentOwner<C> - Class in neureka.common.composition
+
AbstractComponentOwner<C> - Class in neureka.common.composition
Together with the Component interface, this class defines a simple component system in which implementations of the Component interface are managed by extensions of this AbstractComponentOwner.
-
AbstractComponentOwner() - Constructor for class neureka.common.composition.AbstractComponentOwner
+
AbstractComponentOwner() - Constructor for class neureka.common.composition.AbstractComponentOwner
 
-
AbstractCPUConvolution - Class in neureka.backend.main.implementations.convolution
+
AbstractCPUConvolution - Class in neureka.backend.main.implementations.convolution
 
-
AbstractCPUConvolution() - Constructor for class neureka.backend.main.implementations.convolution.AbstractCPUConvolution
+
AbstractCPUConvolution() - Constructor for class neureka.backend.main.implementations.convolution.AbstractCPUConvolution
 
-
AbstractDevice<V> - Class in neureka.devices
+
AbstractDevice<V> - Class in neureka.devices
This is the abstract precursor class providing some useful implementations for core concepts which are most likely applicable to most concrete implementations of the Device interface.
-
AbstractDevice() - Constructor for class neureka.devices.AbstractDevice
+
AbstractDevice() - Constructor for class neureka.devices.AbstractDevice
 
-
AbstractDeviceAlgorithm<C extends DeviceAlgorithm<C>> - Class in neureka.backend.api.template.algorithms
+
AbstractDeviceAlgorithm<C extends DeviceAlgorithm<C>> - Class in neureka.backend.api.template.algorithms
This is a partial implementation of the Algorithm interface which implements the component system for implementation instances of the ImplementationFor interface.
-
AbstractDeviceAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
AbstractDeviceAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
AbstractDeviceData<T> - Class in neureka.devices
+
AbstractDeviceData<T> - Class in neureka.devices
 
-
AbstractDeviceData(AbstractBaseDevice<?>, Object, DataType<T>, Runnable) - Constructor for class neureka.devices.AbstractDeviceData
+
AbstractDeviceData(AbstractBaseDevice<?>, Object, DataType<T>, Runnable) - Constructor for class neureka.devices.AbstractDeviceData
 
-
AbstractFunAlgorithm - Class in neureka.backend.api.template.algorithms
+
AbstractFunAlgorithm - Class in neureka.backend.api.template.algorithms
 
-
AbstractFunAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
AbstractFunAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
 
-
AbstractFunDeviceAlgorithm<C extends DeviceAlgorithm<C>> - Class in neureka.backend.api.template.algorithms
+
AbstractFunDeviceAlgorithm<C extends DeviceAlgorithm<C>> - Class in neureka.backend.api.template.algorithms
This is the base class for implementations of the Algorithm interface.
-
AbstractFunDeviceAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
AbstractFunDeviceAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
 
-
AbstractImplementationFor<D extends Device<?>> - Class in neureka.backend.api.template.implementations
+
AbstractImplementationFor<D extends Device<?>> - Class in neureka.backend.api.template.implementations
 
-
AbstractImplementationFor(ImplementationFor<D>, int) - Constructor for class neureka.backend.api.template.implementations.AbstractImplementationFor
+
AbstractImplementationFor(ImplementationFor<D>, int) - Constructor for class neureka.backend.api.template.implementations.AbstractImplementationFor
 
-
AbstractNDC - Class in neureka.ndim.config
+
AbstractNDC - Class in neureka.ndim.config
The following is an abstract implementation of the NDConfiguration which offers a basis for instantiation and caching of concrete implementations extending this abstract class.
-
AbstractNDC() - Constructor for class neureka.ndim.config.AbstractNDC
+
AbstractNDC() - Constructor for class neureka.ndim.config.AbstractNDC
 
-
AbstractOperation - Class in neureka.backend.api.template.operations
+
AbstractOperation - Class in neureka.backend.api.template.operations
This abstract Operation implementation is a useful template for creating new operations.
-
AbstractOperation(OperationBuilder) - Constructor for class neureka.backend.api.template.operations.AbstractOperation
-
 
-
ACCELERATOR - Enum constant in enum class neureka.devices.opencl.OpenCLDevice.Type
+
AbstractOperation(OperationBuilder) - Constructor for class neureka.backend.api.template.operations.AbstractOperation
 
-
accept(Class<? extends Operation>, Class<? extends DeviceAlgorithm>, Class<? extends D>, Function<LoadingContext, ImplementationFor<D>>) - Method in interface neureka.backend.api.ini.ImplementationReceiver
+
accept(Class<? extends Operation>, Class<? extends DeviceAlgorithm>, Class<? extends D>, Function<LoadingContext, ImplementationFor<D>>) - Method in interface neureka.backend.api.ini.ImplementationReceiver
 
-
access(Tensor<T>) - Method in class neureka.devices.AbstractDevice
+
access(Tensor<T>) - Method in class neureka.devices.AbstractDevice
This method exposes the tensor access API for reading from or writing to a tensor stored on this device.
-
access(Tensor<T>) - Method in interface neureka.devices.Device
+
access(Tensor<T>) - Method in interface neureka.devices.Device
This method exposes the tensor access API for reading from or writing to a tensor stored on this device.
-
access(Tensor<T>) - Method in class neureka.devices.file.FileDevice
+
access(Tensor<T>) - Method in class neureka.devices.file.FileDevice
 
-
act(ADTarget<?>) - Method in interface neureka.autograd.ADAction
+
act(ADTarget<?>) - Method in interface neureka.autograd.ADAction
The auto-differentiation forward or backward pass of an ADAction propagate partial differentiations forward along the computation graph.
-
activationCode() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
activationCode() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarExp
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarExp
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
 
-
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
+
activationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
 
-
actualize() - Method in interface neureka.devices.Device.Access
+
actualize() - Method in interface neureka.devices.Device.Access
 
-
ADAction - Interface in neureka.autograd
+
ADAction - Interface in neureka.autograd
This interface is the declaration for - lambda actions for both the ADAction.act(ADTarget) method of the ADAction interface.
+ lambda actions for both the ADAction.act(ADTarget) method of the ADAction interface.
-
ADAction(Function, ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
ADAction(Function, ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
ADActionSupplier - Interface in neureka.backend.api.fun
+
ADActionSupplier - Interface in neureka.backend.api.fun
Implementations of this functional interface ought to return a new instance of the ADAction class responsible for performing automatic differentiation both for forward and backward mode differentiation.
-
AdaGrad<V extends Number> - Class in neureka.optimization.implementations
+
AdaGrad<V extends java.lang.Number> - Class in neureka.optimization.implementations
Adaptive Gradients, or AdaGrad for short, is an extension of the gradient descent optimization algorithm that adjusts the step size for each parameter based on the squared gradients seen over the course of previous optimization steps.
-
AdaGrad - Static variable in interface neureka.optimization.Optimizer
+
AdaGrad - Static variable in interface neureka.optimization.Optimizer
 
-
AdaGradFactory - Class in neureka.optimization.implementations
+
AdaGradFactory - Class in neureka.optimization.implementations
 
-
AdaGradFactory() - Constructor for class neureka.optimization.implementations.AdaGradFactory
+
AdaGradFactory() - Constructor for class neureka.optimization.implementations.AdaGradFactory
 
-
ADAM<V extends Number> - Class in neureka.optimization.implementations
+
ADAM<V extends java.lang.Number> - Class in neureka.optimization.implementations
ADAM (short for Adaptive Moment Estimation) is an adaptive learning rate optimization algorithm that utilises both momentum and scaling, combining the benefits of RMSProp and SGD with respect to Momentum.
-
ADAM - Static variable in interface neureka.optimization.Optimizer
+
ADAM - Static variable in interface neureka.optimization.Optimizer
 
-
ADAMFactory - Class in neureka.optimization.implementations
+
ADAMFactory - Class in neureka.optimization.implementations
 
-
ADAMFactory() - Constructor for class neureka.optimization.implementations.ADAMFactory
+
ADAMFactory() - Constructor for class neureka.optimization.implementations.ADAMFactory
 
-
add() - Method in class neureka.math.Functions
+
add() - Method in class neureka.math.Functions
 
-
addAssign() - Method in class neureka.math.Functions
+
addAssign() - Method in class neureka.math.Functions
 
-
addChild(Tensor<V>) - Method in class neureka.framing.Relation
+
addChild(Tensor<V>) - Method in class neureka.framing.Relation
 
-
ADDED - Enum constant in enum class neureka.common.composition.Component.IsBeing
+
Addition - Class in neureka.backend.main.operations.operator
 
-
Addition - Class in neureka.backend.main.operations.operator
+
Addition() - Constructor for class neureka.backend.main.operations.operator.Addition
 
-
Addition() - Constructor for class neureka.backend.main.operations.operator.Addition
-
 
-
addOperation(Operation) - Method in class neureka.backend.api.BackendContext
+
addOperation(Operation) - Method in class neureka.backend.api.BackendContext
This method registers Operation implementation instances in this BackendContext which is the thread local execution context receiving and processing Tensor instances...
-
addPending(Set<GraphNode<V>>) - Method in class neureka.autograd.JITProp
+
addPending(Set<GraphNode<V>>) - Method in class neureka.autograd.JITProp
 
-
addPermuteRelationFor(Tensor<V>, int[]) - Method in class neureka.framing.Relation
+
addPermuteRelationFor(Tensor<V>, int[]) - Method in class neureka.framing.Relation
When creating permuted versions of slices then there must be a translation between the shape configuration between this new slice and the original parent tensor from which both slices have been derived.
-
addToGradient(Tensor<T>) - Method in interface neureka.MutateTensor
+
addToGradient(Tensor<T>) - Method in interface neureka.MutateTensor
This method takes the provided Tensor instance and adds its contents to the contents of the Tensor which is set as gradient of this very Tensor.
-
ADSupportPredicate - Interface in neureka.backend.api.fun
+
ADSupportPredicate - Interface in neureka.backend.api.fun
A ADSupportPredicate lambda checks which auto differentiation mode can be performed for a given ExecutionCall.
-
ADTarget<V> - Class in neureka.autograd
+
ADTarget<V> - Class in neureka.autograd
This is simply a wrapper for useful information needed by implementations of the ADAction and ADAction interfaces to perform error propagation.
-
Algorithm - Interface in neureka.backend.api
+
Algorithm - Interface in neureka.backend.api
This class is the middle layer of the 3 tier compositional architecture of this backend, which consists of Operations, Algorithms and in case of a DeviceAlgorithm also ImplementationFor.
-
all() - Method in class neureka.fluent.slicing.AxisSliceBuilder
-
 
-
all() - Method in interface neureka.fluent.slicing.states.FromOrAt
-
-
This is a convenience method replacing "from(0).to(axisSize-1)", meaning that - it simply slices the whole current axis from the original tensor.
-
-
all() - Method in interface neureka.fluent.slicing.states.FromOrAtTensor
-
-
This is a convenience method replacing "from(0).to(axisSize-1)", meaning that - it simply slices the whole current axis from the original tensor.
-
-
all(Call.TensorCompare) - Method in class neureka.backend.api.Call.Validator
+
all(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
 
-
all(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
+
all(Call.TensorCompare) - Method in class neureka.backend.api.Call.Validator
 
-
all(V) - Method in class neureka.fluent.building.NdaBuilder
+
all(V) - Method in class neureka.fluent.building.NdaBuilder
 
-
all(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
+
all(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
This method creates and return a Tensor instance which will be homogeneously filled by the the provided value irrespective of the previously defined shape.
-
all(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
+
all(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
This method creates and return a Tensor instance which will be homogeneously filled by the the provided value irrespective of the previously defined shape.
-
ALL - Enum constant in enum class neureka.devices.opencl.OpenCLDevice.Type
+
all() - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
 
+
all() - Method in interface neureka.fluent.slicing.states.FromOrAt
+
+
This is a convenience method replacing "from(0).to(axisSize-1)", meaning that + it simply slices the whole current axis from the original tensor.
+
+
all() - Method in interface neureka.fluent.slicing.states.FromOrAtTensor
+
+
This is a convenience method replacing "from(0).to(axisSize-1)", meaning that + it simply slices the whole current axis from the original tensor.
+
+
allAliasGetter(Supplier<List<Object>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
 
-
allAliasGetter(Supplier<List<Object>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
+
allAliasGetterFor(Function<Integer, List<Object>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
 
-
allAliasGetterFor(Function<Integer, List<Object>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
+
allMetaArgs() - Method in class neureka.backend.api.Call
 
-
allMetaArgs() - Method in class neureka.backend.api.Call
+
allNotNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
 
-
allNotNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
+
allNotNullHaveSame(Call.TensorProperty) - Method in class neureka.backend.api.Call.Validator
 
-
allNotNullHaveSame(Call.TensorProperty) - Method in class neureka.backend.api.Call.Validator
+
allocate(DataType<T>, NDConfiguration) - Method in interface neureka.devices.Device
 
-
allocate(Class<T>, int, Object) - Method in class neureka.devices.host.CPU
+
allocate(DataType<T>, int) - Method in interface neureka.devices.Device
 
-
allocate(Class<T>, Object) - Method in class neureka.devices.host.CPU
+
allocate(DataType<V>, NDConfiguration) - Method in class neureka.devices.file.FileDevice
 
-
allocate(DataType<T>, int) - Method in interface neureka.devices.Device
+
allocate(Class<T>, Object) - Method in class neureka.devices.host.CPU
 
-
allocate(DataType<T>, NDConfiguration) - Method in interface neureka.devices.Device
+
allocate(Class<T>, int, Object) - Method in class neureka.devices.host.CPU
 
-
allocate(DataType<T>, NDConfiguration) - Method in class neureka.devices.host.CPU
+
allocate(DataType<T>, NDConfiguration) - Method in class neureka.devices.host.CPU
 
-
allocate(DataType<T>, NDConfiguration) - Method in class neureka.devices.opencl.OpenCLDevice
+
allocate(DataType<T>, NDConfiguration) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
allocate(DataType<V>, NDConfiguration) - Method in class neureka.devices.file.FileDevice
+
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in interface neureka.devices.Device
 
-
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in interface neureka.devices.Device
+
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in class neureka.devices.file.FileDevice
 
-
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in class neureka.devices.file.FileDevice
+
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in class neureka.devices.host.CPU
 
-
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in class neureka.devices.host.CPU
+
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
allocateFromAll(DataType<T>, NDConfiguration, Object) - Method in class neureka.devices.opencl.OpenCLDevice
+
allocateFromOne(DataType<T>, NDConfiguration, T) - Method in interface neureka.devices.Device
 
-
allocateFromOne(DataType<T>, NDConfiguration, T) - Method in interface neureka.devices.Device
+
allocateFromOne(DataType<V>, NDConfiguration, V) - Method in class neureka.devices.file.FileDevice
 
-
allocateFromOne(DataType<T>, NDConfiguration, T) - Method in class neureka.devices.host.CPU
+
allocateFromOne(DataType<T>, NDConfiguration, T) - Method in class neureka.devices.host.CPU
 
-
allocateFromOne(DataType<T>, NDConfiguration, T) - Method in class neureka.devices.opencl.OpenCLDevice
+
allocateFromOne(DataType<T>, NDConfiguration, T) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
allocateFromOne(DataType<V>, NDConfiguration, V) - Method in class neureka.devices.file.FileDevice
+
allowsBackward() - Method in enum neureka.backend.api.AutoDiffMode
 
-
allowsBackward() - Method in enum class neureka.backend.api.AutoDiffMode
+
allowsForward() - Method in enum neureka.backend.api.AutoDiffMode
 
-
allowsForward() - Method in enum class neureka.backend.api.AutoDiffMode
+
allShare(Function<Tensor<?>, T>) - Method in class neureka.backend.api.Call.Validator
 
-
allShare(Function<Tensor<?>, T>) - Method in class neureka.backend.api.Call.Validator
+
and(F) - Method in interface neureka.backend.main.algorithms.internal.AndBackward
 
-
and(F) - Method in interface neureka.backend.main.algorithms.internal.AndBackward
+
andArgs(List<Arg>) - Method in class neureka.backend.api.Call.Builder
 
-
andArgs(List<Arg>) - Method in class neureka.backend.api.Call.Builder
+
andArgs(Arg<?>...) - Method in class neureka.backend.api.Call.Builder
 
-
andArgs(List<Arg>) - Method in class neureka.backend.api.ExecutionCall.Builder
+
andArgs(List<Arg>) - Method in class neureka.backend.api.ExecutionCall.Builder
 
-
andArgs(Arg<?>...) - Method in class neureka.backend.api.Call.Builder
+
andArgs(Arg<?>...) - Method in class neureka.backend.api.ExecutionCall.Builder
 
-
andArgs(Arg<?>...) - Method in class neureka.backend.api.ExecutionCall.Builder
+
AndBackward<F> - Interface in neureka.backend.main.algorithms.internal
 
-
AndBackward<F> - Interface in neureka.backend.main.algorithms.internal
+
andFill(V...) - Method in class neureka.fluent.building.NdaBuilder
 
-
andFill(List<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
+
andFill(V...) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
-
Provide a list of values which will be used to fill +
Provide an array of values which will be used to fill the Tensor instance returned by this last fluent builder method.
-
andFill(List<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
+
andFill(List<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
Provide a list of values which will be used to fill the Tensor instance returned by this last fluent builder method.
-
andFill(V...) - Method in class neureka.fluent.building.NdaBuilder
-
 
-
andFill(V...) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
+
andFill(V...) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
Provide an array of values which will be used to fill the Tensor instance returned by this last fluent builder method.
-
andFill(V...) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
+
andFill(List<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
-
Provide an array of values which will be used to fill +
Provide a list of values which will be used to fill the Tensor instance returned by this last fluent builder method.
-
andFillFrom(V) - Method in class neureka.fluent.building.NdaBuilder
+
andFillFrom(V) - Method in class neureka.fluent.building.NdaBuilder
 
-
andFillFrom(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
+
andFillFrom(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
This part of the builder API allows for specifying a range which starts from the provided value and will end at the value specified in the next builder step returned by this method.
-
andFillFrom(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
+
andFillFrom(V) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
This part of the builder API allows for specifying a range which starts from the provided value and will end at the value specified in the next builder step returned by this method.
-
andImplementation(ImplementationFor<CPU>) - Method in interface neureka.backend.main.implementations.CPUImplementation.AndImplementation
+
andImplementation(ImplementationFor<CPU>) - Method in interface neureka.backend.main.implementations.CPUImplementation.AndImplementation
 
-
andSeed(Object) - Method in class neureka.fluent.building.NdaBuilder
+
andSeed(Object) - Method in class neureka.fluent.building.NdaBuilder
 
-
andSeed(Object) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
+
andSeed(Object) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
This method creates and return a Tensor instance which will be filled based on the provided seed object.
-
andSeed(Object) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
+
andSeed(Object) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
This method creates and return a Tensor instance which will be filled based on the provided seed object.
-
andWhere(Filler<V>) - Method in class neureka.fluent.building.NdaBuilder
+
andWhere(Filler<V>) - Method in class neureka.fluent.building.NdaBuilder
This method receives an Filler lambda which will be used to populate the Tensor instance produced by this API with values.
-
andWhere(Filler<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
+
andWhere(Filler<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAll
Pass a lambda to this method which will be used to populate the Tensor built by this fluent builder API based on the indices of the tensor.
-
andWhere(Filler<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
+
andWhere(Filler<V>) - Method in interface neureka.fluent.building.states.IterByOrIterFromOrAllTensor
Pass a lambda to this method which will be used to populate the Tensor built by this fluent builder API based on the indices of the tensor.
-
any(String...) - Static method in interface neureka.devices.Device
+
any(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
+
 
+
any(String...) - Static method in interface neureka.devices.Device
This method returns Device instances matching the given search parameter.
-
any(Predicate<Integer>) - Method in interface neureka.Shape
-
 
-
any(Predicate<V>) - Method in interface neureka.Nda
+
any(Predicate<V>) - Method in interface neureka.Nda
Iterates over every element of this nd-array, and checks whether any element matches the provided lambda.
-
any(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
+
any(Predicate<Integer>) - Method in interface neureka.Shape
 
-
anyNotNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
+
anyNotNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
 
-
applyGradient() - Method in interface neureka.Tensor
+
applyGradient() - Method in interface neureka.Tensor
If this tensor owns a gradient tensor as component, then it can be applied by this method.
-
approve(ExecutionCall<? extends Device<?>>) - Method in class neureka.devices.AbstractDevice
+
approve(ExecutionCall<? extends Device<?>>) - Method in class neureka.devices.AbstractDevice
This method plays an important role in approving a provided ExecutionCall. When implementing custom operations or such for the backend of this library, then one may use this in order to check if the provided call is suitable for this Device.
-
approve(ExecutionCall<? extends Device<?>>) - Method in interface neureka.devices.Device
+
approve(ExecutionCall<? extends Device<?>>) - Method in interface neureka.devices.Device
This method is used internally to give Device implementations the opportunity to perform some exception handling before the ExecutionCall will be dispatched.
-
approve(ExecutionCall<? extends Device<?>>) - Method in class neureka.devices.file.FileDevice
+
approve(ExecutionCall<? extends Device<?>>) - Method in class neureka.devices.file.FileDevice
 
-
architecture - Variable in class neureka.devices.host.machine.CommonMachine
+
architecture - Variable in class neureka.devices.host.machine.CommonMachine
 
-
Arg<T> - Class in neureka.math.args
+
Arg<T> - Class in neureka.math.args
Extend this class to define additional meta arguments for Functions.
-
Arg(T) - Constructor for class neureka.math.args.Arg
+
Arg(T) - Constructor for class neureka.math.args.Arg
 
-
Arg.Axis - Class in neureka.math.args
+
Arg.Axis - Class in neureka.math.args
 
-
Arg.Derivative<V> - Class in neureka.math.args
+
Arg.Derivative<V> - Class in neureka.math.args
 
-
Arg.DerivIdx - Class in neureka.math.args
+
Arg.DerivIdx - Class in neureka.math.args
This is an import argument whose role might not be clear at first : @@ -493,3617 +502,3564 @@

A

when calculating the derivative for a forward or backward pass then one must know which derivative ought to be calculated.
-
Arg.Ends - Class in neureka.math.args
+
Arg.Ends - Class in neureka.math.args
 
-
Arg.Indices - Class in neureka.math.args
+
Arg.Indices - Class in neureka.math.args
 
-
Arg.Layout - Class in neureka.math.args
+
Arg.Layout - Class in neureka.math.args
 
-
Arg.MinRank - Class in neureka.math.args
+
Arg.MinRank - Class in neureka.math.args
 
-
Arg.Offset - Class in neureka.math.args
+
Arg.Offset - Class in neureka.math.args
 
-
Arg.Seed - Class in neureka.math.args
+
Arg.Seed - Class in neureka.math.args
 
-
Arg.Shape - Class in neureka.math.args
+
Arg.Shape - Class in neureka.math.args
 
-
Arg.Stride - Class in neureka.math.args
+
Arg.Stride - Class in neureka.math.args
 
-
Arg.TargetDevice - Class in neureka.math.args
+
Arg.TargetDevice - Class in neureka.math.args
 
-
Arg.VarIdx - Class in neureka.math.args
+
Arg.VarIdx - Class in neureka.math.args
The following argument is relevant for a particular type of operation, namely: an "indexer".
-
ARGB_1INT - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
ARGB_PRE_1INT - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
Args - Class in neureka.math.args
+
Args - Class in neureka.math.args
 
-
Args() - Constructor for class neureka.math.args.Args
+
Args() - Constructor for class neureka.math.args.Args
 
-
arity() - Method in class neureka.backend.api.Call
+
arity() - Method in class neureka.backend.api.Call
 
-
arity(int) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
arity(int) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
as(Class<D>) - Method in interface neureka.Data
+
as(Class<D>) - Method in interface neureka.Data
This returns the underlying raw data object of a nd-array or tensor.
-
asDerivative(Function[], int) - Method in interface neureka.backend.api.Operation
+
asDerivative(Function[], int) - Method in interface neureka.backend.api.Operation
Operation implementations and Function implementations are in a tight relationship where the Function describes an abstract syntax tree based on the syntactic information provided - by the Operation (through methods like Operation.getOperator() or Operation.getIdentifier()).
+ by the Operation (through methods like Operation.getOperator() or Operation.getIdentifier()).
-
asDerivative(Function[], int) - Method in class neureka.backend.api.template.operations.AbstractOperation
+
asDerivative(Function[], int) - Method in class neureka.backend.api.template.operations.AbstractOperation
Operation implementations and Function implementations are in a tight relationship where the Function describes an abstract syntax tree based on the syntactic information provided - by the Operation (through methods like Operation.getOperator() or Operation.getIdentifier()).
+ by the Operation (through methods like Operation.getOperator() or Operation.getIdentifier()).
-
asDerivative(Function[], int) - Method in interface neureka.backend.api.template.operations.OperationBuilder.Derivation
+
asDerivative(Function[], int) - Method in interface neureka.backend.api.template.operations.OperationBuilder.Derivation
 
-
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.functions.Logarithm
+
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.functions.Logarithm
 
-
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Addition
+
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Addition
 
-
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Division
+
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Division
 
-
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Modulo
+
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Modulo
 
-
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Multiplication
+
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Multiplication
 
-
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Power
+
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Power
 
-
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Subtraction
+
asDerivative(Function[], int) - Method in class neureka.backend.main.operations.operator.Subtraction
 
-
asImage(Tensor.ImageType) - Method in interface neureka.Tensor
+
asImage(Tensor.ImageType) - Method in interface neureka.Tensor
-
Turns this tensor into a BufferedImage based on the provided - Tensor.ImageType formatting choice.
+
Turns this tensor into a BufferedImage based on the provided + Tensor.ImageType formatting choice.
-
asInlineArray() - Method in interface neureka.ndim.config.NDConfiguration
+
asInlineArray() - Method in interface neureka.ndim.config.NDConfiguration
This method returns an array of flattened arrays which define this nd-configuration in a compact manner.
-
assign(Nda<T>) - Method in interface neureka.MutateNda
+
assign(T) - Method in interface neureka.MutateNda
-
Use this to assign the provided nd-array to this nd-array! +
Use this to assign the provided item to all elements of this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
-
assign(Nda<T>) - Method in interface neureka.MutateTensor
-
 
-
assign(T) - Method in interface neureka.MutateNda
+
assign(Nda<T>) - Method in interface neureka.MutateNda
-
Use this to assign the provided item to all elements of this nd-array! +
Use this to assign the provided nd-array to this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
-
assign(T) - Method in interface neureka.MutateTensor
+
assign(T) - Method in interface neureka.MutateTensor
+
 
+
assign(Nda<T>) - Method in interface neureka.MutateTensor
 
-
AssignLeft - Class in neureka.backend.main.operations.other
+
AssignLeft - Class in neureka.backend.main.operations.other
 
-
AssignLeft() - Constructor for class neureka.backend.main.operations.other.AssignLeft
+
AssignLeft() - Constructor for class neureka.backend.main.operations.other.AssignLeft
 
-
assumptionBasedOn(String) - Static method in class neureka.math.parsing.ParseUtil
+
assumptionBasedOn(String) - Static method in class neureka.math.parsing.ParseUtil
-
This method tries to find the next best operation String the user might have meant.
+
This method tries to find the next best operation String the user might have meant.
-
asType(Class<T>) - Method in interface neureka.Tensor
+
asType(Class<T>) - Method in interface neureka.Tensor
 
-
at(int) - Method in interface neureka.devices.Device.Writer
+
at(int) - Method in interface neureka.devices.Device.Writer
Writes whatever kind of data was previously specified, to the tensors' data at the position targeted by the provided index.
-
at(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
at(String) - Static method in class neureka.devices.file.FileDevice
+
 
+
at(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
This method returns an instance of this very AxisSliceBuilder instance disguised by the AxisOrGet interface.
-
at(int) - Method in interface neureka.fluent.slicing.states.FromOrAt
+
at(int) - Method in interface neureka.fluent.slicing.states.FromOrAt
This is a convenience method replacing "from(i).to(i)", meaning that it simply slices a single axis from the original tensor at the specified index.
-
at(int) - Method in interface neureka.fluent.slicing.states.FromOrAtTensor
+
at(int) - Method in interface neureka.fluent.slicing.states.FromOrAtTensor
This is a convenience method replacing "from(i).to(i)", meaning that it simply slices a single axis from the original tensor at the specified index.
-
at(int...) - Method in interface neureka.MutateNda
+
At<K,R> - Interface in neureka.framing.fluent
+
 
+
at(K) - Method in interface neureka.framing.fluent.At
+
 
+
at(int...) - Method in interface neureka.MutateNda
Exposes the MutateNda.Item interface which allows you to get or set individual nd-array items.
-
at(int...) - Method in interface neureka.Nda
+
at(int...) - Method in interface neureka.Nda
This method exposes the Nda.Item API which allows you to get or set individual items within this nd-array targeted by an array of provided indices.
-
at(String) - Static method in class neureka.devices.file.FileDevice
-
 
-
at(K) - Method in interface neureka.framing.fluent.At
-
 
-
At<K,R> - Interface in neureka.framing.fluent
-
 
-
atAxis(Object) - Method in class neureka.framing.NDFrame
+
atAxis(Object) - Method in class neureka.framing.NDFrame
A NDFrame exposes aliases for axes as well as aliases for individual positions within an axis.
-
atIndexAlias(Object) - Method in class neureka.framing.fluent.AxisFrame
+
atIndexAlias(Object) - Method in class neureka.framing.fluent.AxisFrame
 
-
autoDelete(Tensor<?>...) - Static method in class neureka.backend.main.memory.MemUtil
+
autoDelete(Tensor<?>...) - Static method in class neureka.backend.main.memory.MemUtil
This method will try to delete the provided array of tensors if the tensors are not important computation graph components (like derivatives for example).
-
AutoDiffMode - Enum Class in neureka.backend.api
+
AutoDiffMode - Enum in neureka.backend.api
 
-
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.ADSupportPredicate
+
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.ADSupportPredicate
Implementations of this ought to check which auto differentiation mode can be performed for a given ExecutionCall.
-
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
 
-
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
 
-
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
autoDiffModeFrom(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
autograd() - Method in class neureka.Neureka.Settings
+
autograd() - Method in class neureka.Neureka.Settings
 
-
autograd(Object) - Method in class neureka.Neureka.Settings
+
autograd(Object) - Method in class neureka.Neureka.Settings
This allows you to configure Neureka using a Groovy DSL.
-
AutoGrad() - Constructor for class neureka.Neureka.Settings.AutoGrad
+
AutoGrad() - Constructor for class neureka.Neureka.Settings.AutoGrad
 
-
autogradMode() - Method in class neureka.backend.api.ExecutionCall
+
autogradMode() - Method in class neureka.backend.api.ExecutionCall
This method queries the underlying Operation for a suitable Algorithm for this ExecutionCall to see what kind of auto differentiation can be performed.
-
axis(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
axis(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
This method returns an instance of the AxisSliceBuilder targeted by the provided index.
-
axis(int) - Method in class neureka.fluent.slicing.SliceBuilder
+
axis(int) - Method in class neureka.fluent.slicing.SliceBuilder
This method returns an instance of the AxisSliceBuilder disguised by the FromOrAt interface.
-
axis(int) - Method in interface neureka.fluent.slicing.states.AxisOrGet
+
axis(int) - Method in interface neureka.fluent.slicing.states.AxisOrGet
Slicing a tensor ultimately means slicing one or more of its axes! This method allows one to specify which axis should be sliced next.
-
axis(int) - Method in interface neureka.fluent.slicing.states.AxisOrGetTensor
+
axis(int) - Method in interface neureka.fluent.slicing.states.AxisOrGetTensor
Slicing a tensor ultimately means slicing one or more of its axes! This method allows one to specify which axis should be sliced next.
-
AxisFrame<G,V> - Class in neureka.framing.fluent
+
AxisFrame<G,V> - Class in neureka.framing.fluent
This class represents the labeled axis of an NDFrame.
-
AxisFrame.Builder<SetType,GetType,ValueType> - Class in neureka.framing.fluent
+
AxisFrame.Builder<SetType,GetType,ValueType> - Class in neureka.framing.fluent
 
-
AxisFrame.Set<V> - Interface in neureka.framing.fluent
+
AxisFrame.Set<V> - Interface in neureka.framing.fluent
 
-
AxisOrGet<V> - Interface in neureka.fluent.slicing.states
+
AxisOrGet<V> - Interface in neureka.fluent.slicing.states
This is the starting point of the call transition graph exposed by the slice builder API.
-
AxisOrGetTensor<V> - Interface in neureka.fluent.slicing.states
+
AxisOrGetTensor<V> - Interface in neureka.fluent.slicing.states
 
-
AxisSliceBuilder<V> - Class in neureka.fluent.slicing
+
AxisSliceBuilder<V> - Class in neureka.fluent.slicing
 
-
AXPY - Class in neureka.backend.main.operations.linear.internal.blas
+
AXPY - Class in neureka.backend.main.operations.linear.internal.blas
The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y are vectors each with a number of elements that equals n.
-
AXPY() - Constructor for class neureka.backend.main.operations.linear.internal.blas.AXPY
+
AXPY() - Constructor for class neureka.backend.main.operations.linear.internal.blas.AXPY
 
-

B

-
-
backend() - Method in class neureka.Neureka
+ + + +

B

+
+
backend() - Method in class neureka.Neureka
 
-
BackendContext - Class in neureka.backend.api
+
BackendContext - Class in neureka.backend.api
Instances of this class are execution contexts hosting Operation instances which receive Tensor instances for execution.
-
BackendContext() - Constructor for class neureka.backend.api.BackendContext
+
BackendContext() - Constructor for class neureka.backend.api.BackendContext
This creates a new context which is completely void of any Operation implementation instances.
-
BackendContext.Runner - Class in neureka.backend.api
+
BackendContext.Runner - Class in neureka.backend.api
This is a very simple class with a single purpose, namely it exposes methods which receive lambda instances in order to then execute them in a given BackendContext, just to then switch back to the original context again.
-
BackendExtension - Interface in neureka.backend.api
+
BackendExtension - Interface in neureka.backend.api
Implementations of this might introduce CUDA or ROCM to Neureka.
-
BackendExtension.DeviceOption - Class in neureka.backend.api
+
BackendExtension.DeviceOption - Class in neureka.backend.api
This class describes an available Device implementation found for a given BackendExtension.
-
BackendLoader - Interface in neureka.backend.api.ini
+
BackendLoader - Interface in neureka.backend.api.ini
 
-
BackendRegistry - Class in neureka.backend.api.ini
+
BackendRegistry - Class in neureka.backend.api.ini
 
-
backward() - Method in interface neureka.Tensor
+
backward(Tensor<V>) - Method in class neureka.autograd.GraphNode
-
Use this to back-propagate an error signal of 1.0 through the recorded computation graph.
+
This method is the entry-point for the back-propagation process.
-
backward(double) - Method in interface neureka.Tensor
+
backward(Tensor<V>) - Method in interface neureka.Tensor
Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
-
backward(Tensor<V>) - Method in class neureka.autograd.GraphNode
+
backward(double) - Method in interface neureka.Tensor
-
This method is the entry-point for the back-propagation process.
+
Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
-
backward(Tensor<V>) - Method in interface neureka.Tensor
+
backward() - Method in interface neureka.Tensor
-
Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
+
Use this to back-propagate an error signal of 1.0 through the recorded computation graph.
-
BACKWARD_ONLY - Enum constant in enum class neureka.backend.api.AutoDiffMode
-
 
-
backwardJIT(Tensor<V>) - Method in class neureka.autograd.GraphNode
+
backwardJIT(Tensor<V>) - Method in class neureka.autograd.GraphNode
This method is called only when JITProp is active.
-
BAD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
BAD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
badIfAll(Call.TensorCompare) - Method in class neureka.backend.api.Call.Validator.Estimator
+
badIfAll(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
badIfAll(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
+
badIfAll(Call.TensorCompare) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
badIfAny(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
+
badIfAny(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
badIfAnyNonNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
+
badIfAnyNonNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
BasicMachine - Class in neureka.devices.host.machine
+
BasicMachine - Class in neureka.devices.host.machine
How much memory, and how many threads share that memory.
-
BasicMachine(long, int) - Constructor for class neureka.devices.host.machine.BasicMachine
+
BasicMachine(long, int) - Constructor for class neureka.devices.host.machine.BasicMachine
 
-
basicSuitability() - Method in class neureka.backend.api.Call.Validator
+
basicSuitability() - Method in class neureka.backend.api.Call.Validator
The validity as float being >0/true and 0/false.
-
belongsToGraph() - Method in interface neureka.Tensor
+
belongsToGraph() - Method in interface neureka.Tensor
Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
-
BGR_1INT - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
BGR_3BYTE - Enum constant in enum class neureka.Tensor.ImageType
+
BiElementwise - Class in neureka.backend.main.algorithms
 
-
BiElementwise - Class in neureka.backend.main.algorithms
+
BiElementwise() - Constructor for class neureka.backend.main.algorithms.BiElementwise
 
-
BiElementwise() - Constructor for class neureka.backend.main.algorithms.BiElementwise
+
BiScalarBroadcast - Class in neureka.backend.main.algorithms
 
-
BiScalarBroadcast - Class in neureka.backend.main.algorithms
+
BiScalarBroadcast() - Constructor for class neureka.backend.main.algorithms.BiScalarBroadcast
 
-
BiScalarBroadcast() - Constructor for class neureka.backend.main.algorithms.BiScalarBroadcast
+
boolToDouble(boolean[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
boolToDouble(boolean[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
boolToFloat(boolean[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
boolToFloat(boolean[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
bootstrapTip() - Method in enum neureka.devices.opencl.utility.Messages.Tips
 
-
bootstrapTip() - Method in enum class neureka.devices.opencl.utility.Messages.Tips
-
 
-
borrow(Tensor<V>, Tensor<V>...) - Method in interface neureka.devices.Device
+
borrow(Tensor<V>, Tensor<V>...) - Method in interface neureka.devices.Device
This is a very simple fluent API for temporarily storing a number of tensors on this Device, executing a provided lambda action, and then migrating all the tensors back to their original devices.
-
Broadcast - Class in neureka.backend.main.algorithms
+
Broadcast - Class in neureka.backend.main.algorithms
 
-
Broadcast() - Constructor for class neureka.backend.main.algorithms.Broadcast
+
Broadcast() - Constructor for class neureka.backend.main.algorithms.Broadcast
 
-
bufferType - Variable in enum class neureka.Tensor.ImageType
+
bufferType - Variable in enum neureka.Tensor.ImageType
 
-
build() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
build() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
build() - Method in class neureka.framing.fluent.AxisFrame.Builder
+
build() - Method in class neureka.framing.fluent.AxisFrame.Builder
 
-
builder() - Static method in interface neureka.backend.api.Operation
+
builder() - Static method in interface neureka.backend.api.Operation
 
-
builder() - Static method in class neureka.framing.fluent.AxisFrame
+
builder() - Static method in class neureka.framing.fluent.AxisFrame
 
-
buildFunAlgorithm() - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
buildFunAlgorithm() - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
 
-
buildFunAlgorithm() - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
buildFunAlgorithm() - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
 
-
byDefaults() - Method in interface neureka.view.NdaAsString.Builder
+
byDefaults() - Method in interface neureka.view.NdaAsString.Builder
 
-
byteToBigInteger(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
byteToBigInteger(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
byteToDouble(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
byteToDouble(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
byteToFloat(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
byteToFloat(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
byteToInt(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
byteToInt(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
byteToLong(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
byteToLong(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
byteToShort(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
byteToShort(byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-

C

-
-
cache - Variable in class neureka.devices.host.machine.CommonMachine
-
-
The size of one top level (L3 or L2) cache unit in bytes.
-
-
Cache<O> - Class in neureka.common.utility
+ + + +

C

+
+
Cache<O> - Class in neureka.common.utility
This is a simple, fixed size cache for immutable objects which are shared throughout the library runtime...
-
Cache(int) - Constructor for class neureka.common.utility.Cache
+
Cache(int) - Constructor for class neureka.common.utility.Cache
 
-
Cache.LazyEntry<K,V> - Class in neureka.common.utility
+
cache - Variable in class neureka.devices.host.machine.CommonMachine
+
+
The size of one top level (L3 or L2) cache unit in bytes.
+
+
Cache.LazyEntry<K,V> - Class in neureka.common.utility
Lazy cache entries are entries whose values will be calculated only when the entry is being stored in the cache.
-
calculate(double[], int, int, Function[]) - Method in interface neureka.backend.api.Operation
+
calculate(double[], int, int, Function[]) - Method in interface neureka.backend.api.Operation
This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of Function instances...
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.functions.SiLU
-
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.indexer.Product
+
calculate(double, boolean) - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.indexer.Summation
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.indexer.Product
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.Convolution
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.indexer.Product
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.DotProduct
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.indexer.Summation
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.MatMul
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.indexer.Summation
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.XConvLeft
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.Convolution
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.XConvRight
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.DotProduct
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Addition
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.MatMul
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Division
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.XConvLeft
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Modulo
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.linear.XConvRight
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Multiplication
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Addition
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Power
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Addition
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Subtraction
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Division
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.AssignLeft
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Division
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Cat
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Modulo
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.DimFit
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Modulo
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.DimTrim
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Multiplication
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Max
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Multiplication
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Min
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Power
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Permute
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Power
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Randomization
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.operator.Subtraction
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.ReLayout
+
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Subtraction
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Reshape
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.AssignLeft
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Slice
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Cat
 
-
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Sum
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.DimFit
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.indexer.Product
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.DimTrim
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.indexer.Summation
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Max
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Addition
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Min
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Division
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Permute
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Modulo
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Randomization
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Multiplication
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.ReLayout
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Power
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Reshape
 
-
calculate(double[], int, Function[]) - Static method in class neureka.backend.main.operations.operator.Subtraction
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Slice
 
-
calculate(double, boolean) - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
calculate(double[], int, int, Function[]) - Method in class neureka.backend.main.operations.other.Sum
 
-
call(double) - Method in interface neureka.math.Function
+
call(Supplier<T>) - Method in class neureka.backend.api.BackendContext.Runner
-
Invokes this Function with the provided scalar as a single input and returns the scalar result.
-
-
call(double...) - Method in interface neureka.math.Function
-
-
Invokes this Function with the provided array of inputs.
+
Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance.
-
call(double[], int) - Method in interface neureka.math.Function
+
Call<D> - Class in neureka.backend.api
-
Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
+
Instances of this class model simple execution calls to the backend.
-
call(double[], int) - Method in class neureka.math.implementations.FunctionConstant
+
Call(Tensor<?>[], D, List<Arg>) - Constructor for class neureka.backend.api.Call
 
-
call(double[], int) - Method in class neureka.math.implementations.FunctionInput
+
call(int) - Method in class neureka.devices.opencl.KernelCaller
 
-
call(double[], int) - Method in class neureka.math.implementations.FunctionNode
-
 
-
call(double[], int) - Method in class neureka.math.implementations.FunctionVariable
-
 
-
call(int) - Method in class neureka.devices.opencl.KernelCaller
-
 
-
call(long[], long[]) - Method in class neureka.devices.opencl.KernelCaller
+
call(long[], long[]) - Method in class neureka.devices.opencl.KernelCaller
Use this to call the kernel with 2 long arrays defining how the kernel should be indexed and parallelized.
-
call(Supplier<T>) - Method in class neureka.backend.api.BackendContext.Runner
+
call(double) - Method in interface neureka.math.Function
-
Use this method to supply a lambda which will be executed in the BackendContext - which produced this very BackendContext.Runner instance.
+
Invokes this Function with the provided scalar as a single input and returns the scalar result.
-
call(List<Tensor<T>>) - Method in interface neureka.math.Function
-
 
-
call(Call.Builder<T, D>) - Method in interface neureka.math.Function
+
call(double[], int) - Method in interface neureka.math.Function
+
+
Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
+
+
call(double...) - Method in interface neureka.math.Function
+
+
Invokes this Function with the provided array of inputs.
+
+
call(Call.Builder<T, D>) - Method in interface neureka.math.Function
Use this for more control over the execution, which is especially useful when interfacing with more complex types of operations, requiring more context information.
-
call(Call<D>) - Method in interface neureka.math.Function
+
call(Call<D>) - Method in interface neureka.math.Function
Use this for more control over the execution, which is very helpful when interfacing with more complex types of operations, requiring more context information.
-
call(Args, Tensor<T>...) - Method in interface neureka.math.Function
+
call(Args, Tensor<T>...) - Method in interface neureka.math.Function
Use this to call this Function alongside with some additional meta-arguments which will be passed to the underlying Operation(s).
-
call(Tensor<T>) - Method in interface neureka.math.Function
+
call(Tensor<T>) - Method in interface neureka.math.Function
 
-
call(Tensor<T>...) - Method in interface neureka.math.Function
+
call(List<Tensor<T>>) - Method in interface neureka.math.Function
 
-
call(Tensor<T>...) - Method in interface neureka.math.Function.Callable
+
call(Tensor<T>[], int) - Method in interface neureka.math.Function
 
-
call(Tensor<T>[], int) - Method in interface neureka.math.Function
+
call(Tensor<T>...) - Method in interface neureka.math.Function
 
-
Call<D> - Class in neureka.backend.api
-
-
Instances of this class model simple execution calls to the backend.
-
-
Call(Tensor<?>[], D, List<Arg>) - Constructor for class neureka.backend.api.Call
+
call(Tensor<T>...) - Method in interface neureka.math.Function.Callable
 
-
Call.Builder<V,T extends Device<V>> - Class in neureka.backend.api
+
call(double[], int) - Method in class neureka.math.implementations.FunctionConstant
 
-
Call.DeviceCondition - Interface in neureka.backend.api
+
call(double[], int) - Method in class neureka.math.implementations.FunctionInput
 
-
Call.Else<T> - Interface in neureka.backend.api
+
call(double[], int) - Method in class neureka.math.implementations.FunctionNode
 
-
Call.OperationCondition - Interface in neureka.backend.api
+
call(double[], int) - Method in class neureka.math.implementations.FunctionVariable
 
-
Call.TensorCompare - Interface in neureka.backend.api
+
Call.Builder<V,T extends Device<V>> - Class in neureka.backend.api
 
-
Call.TensorCondition - Interface in neureka.backend.api
+
Call.DeviceCondition - Interface in neureka.backend.api
 
-
Call.TensorProperty - Interface in neureka.backend.api
+
Call.Else<T> - Interface in neureka.backend.api
 
-
Call.TensorsCondition - Interface in neureka.backend.api
+
Call.OperationCondition - Interface in neureka.backend.api
 
-
Call.Validator - Class in neureka.backend.api
+
Call.TensorCompare - Interface in neureka.backend.api
+
 
+
Call.TensorCondition - Interface in neureka.backend.api
+
 
+
Call.TensorProperty - Interface in neureka.backend.api
+
 
+
Call.TensorsCondition - Interface in neureka.backend.api
+
 
+
Call.Validator - Class in neureka.backend.api
This is a simple nested class offering various lambda based methods for validating the tensor arguments stored inside this ExecutionCall.
-
Call.Validator.Estimator - Class in neureka.backend.api
+
Call.Validator.Estimator - Class in neureka.backend.api
 
-
canAccessOpenCL() - Method in class neureka.Neureka
+
canAccessOpenCL() - Method in class neureka.Neureka
 
-
canAccessOpenCLDevice() - Method in class neureka.Neureka
+
canAccessOpenCLDevice() - Method in class neureka.Neureka
 
-
canBeDeleted() - Method in class neureka.autograd.GraphNode
+
canBeDeleted() - Method in class neureka.autograd.GraphNode
 
-
Cat - Class in neureka.backend.main.operations.other
+
Cat - Class in neureka.backend.main.operations.other
 
-
Cat() - Constructor for class neureka.backend.main.operations.other.Cat
+
Cat() - Constructor for class neureka.backend.main.operations.other.Cat
 
-
cbrt() - Method in class neureka.math.Functions
+
CBRT - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
cbrt() - Method in interface neureka.Tensor
-
-
This method is a functionally identical to the following alternatives:
-
-
Cbrt - Class in neureka.backend.main.operations.functions
+
Cbrt - Class in neureka.backend.main.operations.functions
 
-
Cbrt() - Constructor for class neureka.backend.main.operations.functions.Cbrt
+
Cbrt() - Constructor for class neureka.backend.main.operations.functions.Cbrt
 
-
CBRT - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
cbrt() - Method in class neureka.math.Functions
 
-
change() - Method in class neureka.devices.ReferenceCounter.ChangeEvent
+
cbrt() - Method in interface neureka.Tensor
+
+
This method is a functionally identical to the following alternatives:
+
+
change() - Method in class neureka.devices.ReferenceCounter.ChangeEvent
 
-
ChangeEvent(ReferenceCounter.ChangeType, int, int) - Constructor for class neureka.devices.ReferenceCounter.ChangeEvent
+
ChangeEvent(ReferenceCounter.ChangeType, int, int) - Constructor for class neureka.devices.ReferenceCounter.ChangeEvent
 
-
check(Operation) - Method in interface neureka.backend.api.Call.OperationCondition
+
check(Device<?>) - Method in interface neureka.backend.api.Call.DeviceCondition
 
-
check(Device<?>) - Method in interface neureka.backend.api.Call.DeviceCondition
+
check(Operation) - Method in interface neureka.backend.api.Call.OperationCondition
 
-
check(Tensor<?>) - Method in interface neureka.backend.api.Call.TensorCondition
+
check(Tensor<?>, Tensor<?>) - Method in interface neureka.backend.api.Call.TensorCompare
 
-
check(Tensor<?>[]) - Method in interface neureka.backend.api.Call.TensorsCondition
+
check(Tensor<?>) - Method in interface neureka.backend.api.Call.TensorCondition
 
-
check(Tensor<?>, Tensor<?>) - Method in interface neureka.backend.api.Call.TensorCompare
+
check(Tensor<?>[]) - Method in interface neureka.backend.api.Call.TensorsCondition
 
-
checkArity() - Method in class neureka.backend.api.ExecutionCall
+
checkArity() - Method in class neureka.backend.api.ExecutionCall
 
-
childCount() - Method in class neureka.framing.Relation
+
childCount() - Method in class neureka.framing.Relation
 
-
CLBackend - Class in neureka.backend.ocl
+
CLBackend - Class in neureka.backend.ocl
This is an OpenCL context component for any given BackendContext which extends a given backend context instance for additional functionality, which in this case is the OpenCL backend storing platform and device information.
-
CLBackend() - Constructor for class neureka.backend.ocl.CLBackend
+
CLBackend() - Constructor for class neureka.backend.ocl.CLBackend
Use this constructor if you want to create a new OpenCL world in which there are unique OpenCLPlatform and OpenCLDevice instances.
-
CLBiElementwise - Class in neureka.backend.main.implementations.elementwise
+
CLBiElementwise - Class in neureka.backend.main.implementations.elementwise
 
-
CLBiElementwise(String, String, String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwise
+
CLBiElementwise(String, String, String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwise
 
-
CLBiElementwiseAddition - Class in neureka.backend.main.implementations.elementwise
+
CLBiElementwiseAddition - Class in neureka.backend.main.implementations.elementwise
 
-
CLBiElementwiseAddition(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseAddition
+
CLBiElementwiseAddition(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseAddition
 
-
CLBiElementwiseDivision - Class in neureka.backend.main.implementations.elementwise
+
CLBiElementwiseDivision - Class in neureka.backend.main.implementations.elementwise
 
-
CLBiElementwiseDivision(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseDivision
+
CLBiElementwiseDivision(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseDivision
 
-
CLBiElementwiseModulo - Class in neureka.backend.main.implementations.elementwise
+
CLBiElementwiseModulo - Class in neureka.backend.main.implementations.elementwise
 
-
CLBiElementwiseModulo(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseModulo
+
CLBiElementwiseModulo(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseModulo
 
-
CLBiElementwiseMultiplication - Class in neureka.backend.main.implementations.elementwise
+
CLBiElementwiseMultiplication - Class in neureka.backend.main.implementations.elementwise
 
-
CLBiElementwiseMultiplication(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseMultiplication
+
CLBiElementwiseMultiplication(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseMultiplication
 
-
CLBiElementwisePower - Class in neureka.backend.main.implementations.elementwise
+
CLBiElementwisePower - Class in neureka.backend.main.implementations.elementwise
 
-
CLBiElementwisePower(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwisePower
+
CLBiElementwisePower(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwisePower
 
-
CLBiElementwiseSubtraction - Class in neureka.backend.main.implementations.elementwise
+
CLBiElementwiseSubtraction - Class in neureka.backend.main.implementations.elementwise
 
-
CLBiElementwiseSubtraction(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseSubtraction
+
CLBiElementwiseSubtraction(String) - Constructor for class neureka.backend.main.implementations.elementwise.CLBiElementwiseSubtraction
 
-
CLBroadcast - Class in neureka.backend.main.implementations.broadcast
+
CLBroadcast - Class in neureka.backend.main.implementations.broadcast
 
-
CLBroadcast(String, String, String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcast
+
CLBroadcast(String, String, String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcast
 
-
CLBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
+
CLBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
 
-
CLBroadcastAddition(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastAddition
+
CLBroadcastAddition(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastAddition
 
-
CLBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
+
CLBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
 
-
CLBroadcastDivision(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastDivision
+
CLBroadcastDivision(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastDivision
 
-
CLBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
+
CLBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
 
-
CLBroadcastModulo(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastModulo
+
CLBroadcastModulo(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastModulo
 
-
CLBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
+
CLBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
 
-
CLBroadcastMultiplication(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastMultiplication
+
CLBroadcastMultiplication(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastMultiplication
 
-
CLBroadcastPower - Class in neureka.backend.main.implementations.broadcast
+
CLBroadcastPower - Class in neureka.backend.main.implementations.broadcast
 
-
CLBroadcastPower(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastPower
+
CLBroadcastPower(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastPower
 
-
CLBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
+
CLBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
 
-
CLBroadcastSubtraction(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastSubtraction
+
CLBroadcastSubtraction(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLBroadcastSubtraction
 
-
clConfigOf(NDConfiguration) - Method in class neureka.devices.opencl.OpenCLDevice
+
clConfigOf(Tensor<?>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
clConfigOf(Tensor<?>) - Method in class neureka.devices.opencl.OpenCLDevice
+
clConfigOf(NDConfiguration) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
clContextCouldNotFindAnyDevices() - Static method in class neureka.devices.opencl.utility.Messages
+
clContextCouldNotFindAnyDevices() - Static method in class neureka.devices.opencl.utility.Messages
 
-
clContextCreationFailed() - Static method in class neureka.devices.opencl.utility.Messages
+
clContextCreationFailed() - Static method in class neureka.devices.opencl.utility.Messages
 
-
CLConvolution - Class in neureka.backend.main.implementations.convolution
+
CLConvolution - Class in neureka.backend.main.implementations.convolution
 
-
CLConvolution(String) - Constructor for class neureka.backend.main.implementations.convolution.CLConvolution
+
CLConvolution(String) - Constructor for class neureka.backend.main.implementations.convolution.CLConvolution
 
-
CLDot - Class in neureka.backend.main.implementations.linear
+
CLDot - Class in neureka.backend.main.implementations.linear
Performs a dot product on two vectors using OpenCL.
-
CLDot() - Constructor for class neureka.backend.main.implementations.linear.CLDot
+
CLDot() - Constructor for class neureka.backend.main.implementations.linear.CLDot
 
-
cleanedHeadAndTail(String) - Static method in class neureka.math.parsing.ParseUtil
+
cleanedHeadAndTail(String) - Static method in class neureka.math.parsing.ParseUtil
 
-
cleanup(Runnable) - Method in interface neureka.devices.Device.Access
+
cleanup(Runnable) - Method in interface neureka.devices.Device.Access
Use this to perform some custom memory cleanup for when the accessed Tensor gets garbage collected.
-
CLElementwiseFunction - Class in neureka.backend.main.implementations.elementwise
+
CLElementwiseFunction - Class in neureka.backend.main.implementations.elementwise
 
-
CLElementwiseFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.elementwise.CLElementwiseFunction
+
CLElementwiseFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.elementwise.CLElementwiseFunction
 
-
CLFunctionCompiler - Class in neureka.devices.opencl.utility
+
CLFunctionCompiler - Class in neureka.devices.opencl.utility
Turns a Function into OpenCL kernel code to make optimized just in time compilation possible.
-
CLFunctionCompiler(OpenCLDevice, Function, String) - Constructor for class neureka.devices.opencl.utility.CLFunctionCompiler
+
CLFunctionCompiler(OpenCLDevice, Function, String) - Constructor for class neureka.devices.opencl.utility.CLFunctionCompiler
 
-
CLGEMM - Class in neureka.backend.main.operations.linear.internal.opencl
+
CLGEMM - Class in neureka.backend.main.operations.linear.internal.opencl
 
-
CLGEMM() - Constructor for class neureka.backend.main.operations.linear.internal.opencl.CLGEMM
+
CLGEMM() - Constructor for class neureka.backend.main.operations.linear.internal.opencl.CLGEMM
 
-
CLImplementation - Class in neureka.backend.main.implementations
+
CLImplementation - Class in neureka.backend.main.implementations
This class is the ExecutorFor < OpenCLDevice > implementation used to properly call an OpenCLDevice instance via the ExecutionOn < OpenCLDevice > lambda implementation receiving an instance of the ExecutionCall class.
-
CLImplementation(ImplementationFor<OpenCLDevice>, int) - Constructor for class neureka.backend.main.implementations.CLImplementation
+
CLImplementation(ImplementationFor<OpenCLDevice>, int) - Constructor for class neureka.backend.main.implementations.CLImplementation
 
-
CLMatMul - Class in neureka.backend.main.implementations.matmul
+
CLMatMul - Class in neureka.backend.main.implementations.matmul
 
-
CLMatMul() - Constructor for class neureka.backend.main.implementations.matmul.CLMatMul
+
CLMatMul() - Constructor for class neureka.backend.main.implementations.matmul.CLMatMul
 
-
clone() - Method in class neureka.backend.api.BackendContext
+
clone() - Method in class neureka.backend.api.BackendContext
This method produces a shallow copy of this BackendContext.
-
clone() - Method in class neureka.view.NDPrintSettings
+
clone() - Method in class neureka.view.NDPrintSettings
 
-
CLRandomization - Class in neureka.backend.main.implementations.elementwise
+
CLRandomization - Class in neureka.backend.main.implementations.elementwise
 
-
CLRandomization() - Constructor for class neureka.backend.main.implementations.elementwise.CLRandomization
+
CLRandomization() - Constructor for class neureka.backend.main.implementations.elementwise.CLRandomization
 
-
CLReduce - Class in neureka.backend.main.operations.linear.internal.opencl
+
CLReduce - Class in neureka.backend.main.operations.linear.internal.opencl
 
-
CLReduce(CLReduce.Type) - Constructor for class neureka.backend.main.operations.linear.internal.opencl.CLReduce
+
CLReduce(CLReduce.Type) - Constructor for class neureka.backend.main.operations.linear.internal.opencl.CLReduce
 
-
CLReduce.Type - Enum Class in neureka.backend.main.operations.linear.internal.opencl
+
CLReduce.Type - Enum in neureka.backend.main.operations.linear.internal.opencl
 
-
CLScalarBroadcast - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcast - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcast(String, String, String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcast
+
CLScalarBroadcast(String, String, String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcast
 
-
CLScalarBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcastAddition(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastAddition
+
CLScalarBroadcastAddition(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastAddition
 
-
CLScalarBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcastDivision(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastDivision
+
CLScalarBroadcastDivision(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastDivision
 
-
CLScalarBroadcastIdentity - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcastIdentity - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcastIdentity(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastIdentity
+
CLScalarBroadcastIdentity(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastIdentity
 
-
CLScalarBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcastModulo(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastModulo
+
CLScalarBroadcastModulo(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastModulo
 
-
CLScalarBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcastMultiplication(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastMultiplication
+
CLScalarBroadcastMultiplication(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastMultiplication
 
-
CLScalarBroadcastPower - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcastPower - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcastPower(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastPower
+
CLScalarBroadcastPower(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastPower
 
-
CLScalarBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
+
CLScalarBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
 
-
CLScalarBroadcastSubtraction(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastSubtraction
+
CLScalarBroadcastSubtraction(String) - Constructor for class neureka.backend.main.implementations.broadcast.CLScalarBroadcastSubtraction
 
-
CLScalarFunction - Class in neureka.backend.main.implementations.scalar
+
CLScalarFunction - Class in neureka.backend.main.implementations.scalar
 
-
CLScalarFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.scalar.CLScalarFunction
+
CLScalarFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.scalar.CLScalarFunction
 
-
CLSettings - Class in neureka.backend.ocl
+
CLSettings - Class in neureka.backend.ocl
OpenCL related settings for the CLBackend extension.
-
CLSettings() - Constructor for class neureka.backend.ocl.CLSettings
-
 
-
CLSum - Class in neureka.backend.main.operations.linear.internal.opencl
-
 
-
CLSum() - Constructor for class neureka.backend.main.operations.linear.internal.opencl.CLSum
+
CLSettings() - Constructor for class neureka.backend.ocl.CLSettings
 
-
COL_MAJOR - Enum constant in enum class neureka.ndim.config.NDTrait
+
CLSum - Class in neureka.backend.main.operations.linear.internal.opencl
 
-
COLUMN_MAJOR - Enum constant in enum class neureka.ndim.config.NDConfiguration.Layout
+
CLSum() - Constructor for class neureka.backend.main.operations.linear.internal.opencl.CLSum
 
-
CommonMachine - Class in neureka.devices.host.machine
+
CommonMachine - Class in neureka.devices.host.machine
Stuff common to Hardware and ConcreteMachine.
-
CommonMachine(String, BasicMachine[]) - Constructor for class neureka.devices.host.machine.CommonMachine
+
CommonMachine(Hardware, Runtime) - Constructor for class neureka.devices.host.machine.CommonMachine
+
 
+
CommonMachine(String, BasicMachine[]) - Constructor for class neureka.devices.host.machine.CommonMachine
new MemoryThreads[] { SYSTEM, L3, L2, L1 } or new MemoryThreads[] { SYSTEM, L2, L1 } or in worst case new MemoryThreads[] { SYSTEM, L1 }
-
CommonMachine(Hardware, Runtime) - Constructor for class neureka.devices.host.machine.CommonMachine
-
 
-
COMPACT - Enum constant in enum class neureka.autograd.GraphNode.Print
-
 
-
COMPACT - Enum constant in enum class neureka.ndim.config.NDTrait
-
 
-
compareTo(Hardware) - Method in class neureka.devices.host.machine.Hardware
+
compareTo(Hardware) - Method in class neureka.devices.host.machine.Hardware
 
-
compileAdHocKernel(String, String) - Method in class neureka.devices.opencl.OpenCLDevice
+
compileAdHocKernel(String, String) - Method in class neureka.devices.opencl.OpenCLDevice
This method compiles so called "ad hoc" kernel.
-
compileAndGetAdHocKernel(String, String) - Method in class neureka.devices.opencl.OpenCLDevice
+
compileAndGetAdHocKernel(String, String) - Method in class neureka.devices.opencl.OpenCLDevice
This method compiles and returns the KernelCaller for a so called "ad hoc" kernel.
-
Component<O> - Interface in neureka.common.composition
+
Component<O> - Interface in neureka.common.composition
This interface alongside the AbstractComponentOwner class define a simple component system.
-
Component.IsBeing - Enum Class in neureka.common.composition
+
Component.IsBeing - Enum in neureka.common.composition
Entries of this enum represent events describing updates to the state of the owner of a given Component instance.
-
Component.OwnerChangeRequest<O> - Interface in neureka.common.composition
+
Component.OwnerChangeRequest<O> - Interface in neureka.common.composition
Component.OwnerChangeRequest implementation instances will be passed to - the Component.update(OwnerChangeRequest) method which inform a + the Component.update(OwnerChangeRequest) method which inform a given component about a state change related to said component.
-
ComponentOwner<C> - Interface in neureka.common.composition
+
ComponentOwner<C> - Interface in neureka.common.composition
A component owner is a thing holding components which can be accessed by their type class.
-
concat() - Method in class neureka.math.Functions
+
concat() - Method in class neureka.math.Functions
 
-
concatAt(int, Nda<V>) - Method in interface neureka.Nda
+
concatAt(int, Nda<V>, Nda<V>...) - Method in interface neureka.Nda
-
This method concatenates the provided nd-array together with this nd-array along a specified axis.
+
This method concatenates the provided nd-arrays together with this nd-array along a specified axis.
-
concatAt(int, Nda<V>) - Method in interface neureka.Tensor
+
concatAt(int, Nda<V>) - Method in interface neureka.Nda
This method concatenates the provided nd-array together with this nd-array along a specified axis.
-
concatAt(int, Nda<V>, Nda<V>...) - Method in interface neureka.Nda
+
concatAt(int, Nda<V>, Nda<V>...) - Method in interface neureka.Tensor
This method concatenates the provided nd-arrays together with this nd-array along a specified axis.
-
concatAt(int, Nda<V>, Nda<V>...) - Method in interface neureka.Tensor
+
concatAt(int, Nda<V>) - Method in interface neureka.Tensor
-
This method concatenates the provided nd-arrays together with this nd-array along a specified axis.
+
This method concatenates the provided nd-array together with this nd-array along a specified axis.
-
ConcreteMachine - Class in neureka.devices.host.machine
+
ConcreteMachine - Class in neureka.devices.host.machine
 
-
confidence() - Method in class neureka.backend.api.BackendExtension.DeviceOption
+
confidence() - Method in class neureka.backend.api.BackendExtension.DeviceOption
 
-
configure(Object) - Static method in class neureka.Neureka
+
configure(Object) - Static method in class neureka.Neureka
This allows you to configure Neureka using a Groovy DSL.
-
construct() - Static method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
 
-
construct(int[]) - Static method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
 
-
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
 
-
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
 
-
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
construct() - Static method in class neureka.ndim.config.types.simple.Simple0DConfiguration
 
-
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.Simple1DConfiguration
 
-
construct(int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.Simple2DConfiguration
 
-
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.Simple3DConfiguration
 
-
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
construct(int[], int[]) - Static method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
 
-
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
construct(int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
 
-
construct(int[], int[], int[]) - Static method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
 
-
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
 
-
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
 
-
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
 
-
construct(int[], int[], int[], int[], int[]) - Static method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
construct(int[]) - Static method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
 
-
contains(Tensor<V>) - Method in class neureka.devices.AbstractBaseDevice
+
contains(Tensor<V>) - Method in class neureka.devices.AbstractBaseDevice
 
-
contains(Tensor<Number>) - Method in class neureka.devices.file.IDXHandle
+
contains(Tensor<V>) - Method in interface neureka.devices.Storage
 
-
contains(Tensor<V>) - Method in interface neureka.devices.Storage
-
 
-
contains(Tensor<V>) - Method in interface neureka.Tensor
+
contains(Tensor<V>) - Method in interface neureka.Tensor
This method name translates to the "in" keyword in Kotlin! The same is true for the "isCase" method in Groovy.
-
CONTINUOUS_MATRIX - Enum constant in enum class neureka.ndim.config.NDTrait
-
 
-
conv() - Method in class neureka.math.Functions
+
conv() - Method in class neureka.math.Functions
 
-
conv(Tensor<V>) - Method in interface neureka.Tensor
+
conv(Tensor<V>) - Method in interface neureka.Tensor
This method performs convolution between this tensor and the one passed as argument.
-
convDot(Tensor<V>) - Method in interface neureka.Tensor
+
convDot(Tensor<V>) - Method in interface neureka.Tensor
This method performs a convolutional based dot product between the last dimension of this tensor and the first dimension of the passed tensor.
-
convert(Object, Class<T>) - Method in class neureka.common.utility.DataConverter
+
convert(Object, Class<T>) - Method in class neureka.common.utility.DataConverter
This method embodies the purpose of this class.
-
convertToHolder(Object) - Method in class neureka.dtype.custom.F32
+
convertToHolder(Object) - Method in class neureka.dtype.custom.F32
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.F64
+
convertToHolder(Object) - Method in class neureka.dtype.custom.F64
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.I16
+
convertToHolder(Object) - Method in class neureka.dtype.custom.I16
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.I32
+
convertToHolder(Object) - Method in class neureka.dtype.custom.I32
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.I64
+
convertToHolder(Object) - Method in class neureka.dtype.custom.I64
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.I8
+
convertToHolder(Object) - Method in class neureka.dtype.custom.I8
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.UI16
+
convertToHolder(Object) - Method in class neureka.dtype.custom.UI16
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.UI32
+
convertToHolder(Object) - Method in class neureka.dtype.custom.UI32
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.UI64
+
convertToHolder(Object) - Method in class neureka.dtype.custom.UI64
 
-
convertToHolder(Object) - Method in class neureka.dtype.custom.UI8
+
convertToHolder(Object) - Method in class neureka.dtype.custom.UI8
 
-
convertToHolder(Object) - Method in interface neureka.dtype.NumericType
+
convertToHolder(Object) - Method in interface neureka.dtype.NumericType
This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.F32
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.F32
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.F64
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.F64
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I16
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I16
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I32
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I32
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I64
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I64
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I8
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.I8
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI16
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI16
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI32
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI32
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI64
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI64
 
-
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI8
+
convertToHolderArray(Object) - Method in class neureka.dtype.custom.UI8
 
-
convertToHolderArray(Object) - Method in interface neureka.dtype.NumericType
+
convertToHolderArray(Object) - Method in interface neureka.dtype.NumericType
This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
-
convertToTarget(Object) - Method in class neureka.dtype.custom.F32
+
convertToTarget(Object) - Method in class neureka.dtype.custom.F32
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.F64
+
convertToTarget(Object) - Method in class neureka.dtype.custom.F64
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.I16
+
convertToTarget(Object) - Method in class neureka.dtype.custom.I16
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.I32
+
convertToTarget(Object) - Method in class neureka.dtype.custom.I32
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.I64
+
convertToTarget(Object) - Method in class neureka.dtype.custom.I64
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.I8
+
convertToTarget(Object) - Method in class neureka.dtype.custom.I8
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.UI16
+
convertToTarget(Object) - Method in class neureka.dtype.custom.UI16
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.UI32
+
convertToTarget(Object) - Method in class neureka.dtype.custom.UI32
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.UI64
+
convertToTarget(Object) - Method in class neureka.dtype.custom.UI64
 
-
convertToTarget(Object) - Method in class neureka.dtype.custom.UI8
+
convertToTarget(Object) - Method in class neureka.dtype.custom.UI8
 
-
convertToTarget(Object) - Method in interface neureka.dtype.NumericType
+
convertToTarget(Object) - Method in interface neureka.dtype.NumericType
This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.F32
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.F32
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.F64
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.F64
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I16
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I16
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I32
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I32
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I64
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I64
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I8
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.I8
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI16
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI16
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI32
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI32
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI64
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI64
 
-
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI8
+
convertToTargetArray(Object) - Method in class neureka.dtype.custom.UI8
 
-
convertToTargetArray(Object) - Method in interface neureka.dtype.NumericType
+
convertToTargetArray(Object) - Method in interface neureka.dtype.NumericType
This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
-
Convolution - Class in neureka.backend.main.operations.linear
+
Convolution - Class in neureka.backend.main.operations.linear
 
-
Convolution() - Constructor for class neureka.backend.main.operations.linear.Convolution
+
Convolution() - Constructor for class neureka.backend.main.operations.linear.Convolution
 
-
ConvUtil - Class in neureka.backend.main.operations
+
ConvUtil - Class in neureka.backend.main.operations
 
-
ConvUtil() - Constructor for class neureka.backend.main.operations.ConvUtil
+
ConvUtil() - Constructor for class neureka.backend.main.operations.ConvUtil
 
-
COPY - Class in neureka.backend.main.operations.linear.internal.blas
+
COPY - Class in neureka.backend.main.operations.linear.internal.blas
The ?copy routines perform a vector-vector operation defined as y = x, where x and y are vectors.
-
COPY() - Constructor for class neureka.backend.main.operations.linear.internal.blas.COPY
+
COPY() - Constructor for class neureka.backend.main.operations.linear.internal.blas.COPY
 
-
copyOf(T[]) - Static method in class neureka.backend.main.operations.linear.internal.blas.COPY
+
copyOf(T[]) - Static method in class neureka.backend.main.operations.linear.internal.blas.COPY
 
-
cores - Variable in class neureka.devices.host.machine.CommonMachine
+
cores - Variable in class neureka.devices.host.machine.CommonMachine
The total number of processor cores.
-
CORES - Enum constant in enum class neureka.devices.host.concurrent.Parallelism
-
-
The number of CPU cores
-
-
cos() - Method in class neureka.math.Functions
+
cos() - Method in class neureka.math.Functions
 
-
cos() - Method in interface neureka.Tensor
+
cos() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
Cosinus - Class in neureka.backend.main.operations.functions
+
COSINUS - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Cosinus() - Constructor for class neureka.backend.main.operations.functions.Cosinus
+
Cosinus - Class in neureka.backend.main.operations.functions
 
-
COSINUS - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
Cosinus() - Constructor for class neureka.backend.main.operations.functions.Cosinus
 
-
count() - Method in class neureka.devices.ReferenceCounter
+
count() - Method in class neureka.devices.ReferenceCounter
 
-
count(Predicate<Integer>) - Method in interface neureka.Shape
-
 
-
count(Predicate<V>) - Method in interface neureka.Nda
+
count(Predicate<V>) - Method in interface neureka.Nda
Iterates over every element of this nd-array, and counts the number of times the provided lambda matches the items of this array.
-
CPU - Class in neureka.devices.host
+
count(Predicate<Integer>) - Method in interface neureka.Shape
+
 
+
CPU - Class in neureka.devices.host
The CPU class, one of many implementations of the Device interface, is simply supposed to be an API for dispatching threaded workloads onto the CPU as well as reading from or writing to tensors it stores.
-
CPU - Enum constant in enum class neureka.devices.opencl.OpenCLDevice.Type
+
CPU.IndexedWorkload - Interface in neureka.devices.host
 
-
CPU.IndexedWorkload - Interface in neureka.devices.host
-
 
-
CPU.JVMExecutor - Class in neureka.devices.host
+
CPU.JVMExecutor - Class in neureka.devices.host
The CPU.JVMExecutor offers a similar functionality as the parallel stream API, however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas instead of simply exposing a single index or concrete elements for a given workload size.
-
CPU.RangeWorkload - Interface in neureka.devices.host
+
CPU.RangeWorkload - Interface in neureka.devices.host
A simple functional interface for executing a range whose implementations will either be executed sequentially or they are being dispatched to a thread-pool, given that the provided workload is large enough.
-
CPUBackend - Class in neureka.backend.cpu
+
CPUBackend - Class in neureka.backend.cpu
This class loads the CPU operations into the Neureka library context.
-
CPUBackend() - Constructor for class neureka.backend.cpu.CPUBackend
+
CPUBackend() - Constructor for class neureka.backend.cpu.CPUBackend
 
-
CPUBiElementWise - Class in neureka.backend.main.implementations.elementwise
+
CPUBiElementWise - Class in neureka.backend.main.implementations.elementwise
 
-
CPUBiElementWise() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWise
+
CPUBiElementWise() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWise
 
-
CPUBiElementWiseAddition - Class in neureka.backend.main.implementations.elementwise
+
CPUBiElementWiseAddition - Class in neureka.backend.main.implementations.elementwise
 
-
CPUBiElementWiseAddition() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
+
CPUBiElementWiseAddition() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
 
-
CPUBiElementWiseDivision - Class in neureka.backend.main.implementations.elementwise
+
CPUBiElementWiseDivision - Class in neureka.backend.main.implementations.elementwise
 
-
CPUBiElementWiseDivision() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
+
CPUBiElementWiseDivision() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
 
-
CPUBiElementWiseModulo - Class in neureka.backend.main.implementations.elementwise
+
CPUBiElementWiseModulo - Class in neureka.backend.main.implementations.elementwise
 
-
CPUBiElementWiseModulo() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
+
CPUBiElementWiseModulo() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
 
-
CPUBiElementWiseMultiplication - Class in neureka.backend.main.implementations.elementwise
+
CPUBiElementWiseMultiplication - Class in neureka.backend.main.implementations.elementwise
 
-
CPUBiElementWiseMultiplication() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
+
CPUBiElementWiseMultiplication() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
 
-
CPUBiElementWisePower - Class in neureka.backend.main.implementations.elementwise
+
CPUBiElementWisePower - Class in neureka.backend.main.implementations.elementwise
 
-
CPUBiElementWisePower() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
+
CPUBiElementWisePower() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
 
-
CPUBiElementWiseSubtraction - Class in neureka.backend.main.implementations.elementwise
+
CPUBiElementWiseSubtraction - Class in neureka.backend.main.implementations.elementwise
 
-
CPUBiElementWiseSubtraction() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
+
CPUBiElementWiseSubtraction() - Constructor for class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
 
-
CPUBiFun - Interface in neureka.backend.main.implementations.fun.api
+
CPUBiFun - Interface in neureka.backend.main.implementations.fun.api
 
-
CPUBroadcast - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcast - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcast() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcast
+
CPUBroadcast() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcast
 
-
CPUBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcastAddition() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
+
CPUBroadcastAddition() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
 
-
CPUBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcastDivision() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
+
CPUBroadcastDivision() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
 
-
CPUBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcastModulo() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
+
CPUBroadcastModulo() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
 
-
CPUBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcastMultiplication() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
+
CPUBroadcastMultiplication() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
 
-
CPUBroadcastPower - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcastPower - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcastPower() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
+
CPUBroadcastPower() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
 
-
CPUBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcastSubtraction() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
+
CPUBroadcastSubtraction() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
 
-
CPUBroadcastSummation - Class in neureka.backend.main.implementations.broadcast
+
CPUBroadcastSummation - Class in neureka.backend.main.implementations.broadcast
 
-
CPUBroadcastSummation() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
+
CPUBroadcastSummation() - Constructor for class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
 
-
CPUConvolution - Class in neureka.backend.main.implementations.convolution
+
CPUConvolution - Class in neureka.backend.main.implementations.convolution
 
-
CPUConvolution() - Constructor for class neureka.backend.main.implementations.convolution.CPUConvolution
+
CPUConvolution() - Constructor for class neureka.backend.main.implementations.convolution.CPUConvolution
 
-
CPUDot - Class in neureka.backend.main.implementations.linear
+
CPUDot - Class in neureka.backend.main.implementations.linear
 
-
CPUDot() - Constructor for class neureka.backend.main.implementations.linear.CPUDot
+
CPUDot() - Constructor for class neureka.backend.main.implementations.linear.CPUDot
 
-
CPUElementwiseAssignFun - Class in neureka.backend.main.implementations.elementwise
+
CPUElementwiseAssignFun - Class in neureka.backend.main.implementations.elementwise
 
-
CPUElementwiseAssignFun() - Constructor for class neureka.backend.main.implementations.elementwise.CPUElementwiseAssignFun
+
CPUElementwiseAssignFun() - Constructor for class neureka.backend.main.implementations.elementwise.CPUElementwiseAssignFun
 
-
CPUElementwiseFunction - Class in neureka.backend.main.implementations.elementwise
+
CPUElementwiseFunction - Class in neureka.backend.main.implementations.elementwise
 
-
CPUElementwiseFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.elementwise.CPUElementwiseFunction
+
CPUElementwiseFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.elementwise.CPUElementwiseFunction
 
-
CPUFun - Interface in neureka.backend.main.implementations.fun.api
+
CPUFun - Interface in neureka.backend.main.implementations.fun.api
 
-
CPUImplementation - Class in neureka.backend.main.implementations
+
CPUImplementation - Class in neureka.backend.main.implementations
This class is a wrapper class for the ImplementationFor<CPU> interface which enables a functional style of implementing the backend API!
It is used merely as a simple formality and implementation type specification.
-
CPUImplementation.AndImplementation - Interface in neureka.backend.main.implementations
+
CPUImplementation.AndImplementation - Interface in neureka.backend.main.implementations
This is represents the second step in the simple builder API for CPUImplementation instances.
-
CPUMatMul - Class in neureka.backend.main.implementations.matmul
+
CPUMatMul - Class in neureka.backend.main.implementations.matmul
This is a library internal class, do not depend on this.
-
CPUMatMul() - Constructor for class neureka.backend.main.implementations.matmul.CPUMatMul
+
CPUMatMul() - Constructor for class neureka.backend.main.implementations.matmul.CPUMatMul
 
-
CPURandomization - Class in neureka.backend.main.implementations.elementwise
+
CPURandomization - Class in neureka.backend.main.implementations.elementwise
 
-
CPURandomization() - Constructor for class neureka.backend.main.implementations.elementwise.CPURandomization
+
CPURandomization() - Constructor for class neureka.backend.main.implementations.elementwise.CPURandomization
 
-
CPUReduce - Class in neureka.backend.main.operations.other.internal
+
CPUReduce - Class in neureka.backend.main.operations.other.internal
An implementation of the min and max algorithm running on the CPU.
-
CPUReduce(CPUReduce.Type) - Constructor for class neureka.backend.main.operations.other.internal.CPUReduce
+
CPUReduce(CPUReduce.Type) - Constructor for class neureka.backend.main.operations.other.internal.CPUReduce
 
-
CPUReduce.Type - Enum Class in neureka.backend.main.operations.other.internal
+
CPUReduce.Type - Enum in neureka.backend.main.operations.other.internal
 
-
CPUScalaBroadcastPower - Class in neureka.backend.main.implementations.broadcast
+
CPUScalaBroadcastPower - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalaBroadcastPower() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
+
CPUScalaBroadcastPower() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
 
-
CPUScalarBroadcast - Class in neureka.backend.main.implementations.broadcast
+
CPUScalarBroadcast - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalarBroadcast() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
+
CPUScalarBroadcast() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
 
-
CPUScalarBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
+
CPUScalarBroadcastAddition - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalarBroadcastAddition() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
+
CPUScalarBroadcastAddition() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
 
-
CPUScalarBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
+
CPUScalarBroadcastDivision - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalarBroadcastDivision() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
+
CPUScalarBroadcastDivision() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
 
-
CPUScalarBroadcastFunction - Class in neureka.backend.main.implementations.scalar
+
CPUScalarBroadcastFunction - Class in neureka.backend.main.implementations.scalar
 
-
CPUScalarBroadcastFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction
+
CPUScalarBroadcastFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction
 
-
CPUScalarBroadcastIdentity - Class in neureka.backend.main.implementations.broadcast
+
CPUScalarBroadcastIdentity - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalarBroadcastIdentity() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
+
CPUScalarBroadcastIdentity() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
 
-
CPUScalarBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
+
CPUScalarBroadcastModulo - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalarBroadcastModulo() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
+
CPUScalarBroadcastModulo() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
 
-
CPUScalarBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
+
CPUScalarBroadcastMultiplication - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalarBroadcastMultiplication() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
+
CPUScalarBroadcastMultiplication() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
 
-
CPUScalarBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
+
CPUScalarBroadcastSubtraction - Class in neureka.backend.main.implementations.broadcast
 
-
CPUScalarBroadcastSubtraction() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
+
CPUScalarBroadcastSubtraction() - Constructor for class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
 
-
CPUScalarFunction - Class in neureka.backend.main.implementations.scalar
+
CPUScalarFunction - Class in neureka.backend.main.implementations.scalar
 
-
CPUScalarFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.scalar.CPUScalarFunction
+
CPUScalarFunction(ScalarFun) - Constructor for class neureka.backend.main.implementations.scalar.CPUScalarFunction
 
-
CPUSum - Class in neureka.backend.main.operations.other.internal
+
CPUSum - Class in neureka.backend.main.operations.other.internal
An implementation of the sum and may algorithm running on the CPU.
-
CPUSum() - Constructor for class neureka.backend.main.operations.other.internal.CPUSum
+
CPUSum() - Constructor for class neureka.backend.main.operations.other.internal.CPUSum
 
-
create(Tensor<V>) - Method in class neureka.optimization.implementations.AdaGradFactory
+
create(Tensor<V>) - Method in class neureka.optimization.implementations.AdaGradFactory
 
-
create(Tensor<V>) - Method in class neureka.optimization.implementations.ADAMFactory
+
create(Tensor<V>) - Method in class neureka.optimization.implementations.ADAMFactory
 
-
create(Tensor<V>) - Method in class neureka.optimization.implementations.MomentumFactory
+
create(Tensor<V>, Tensor<V>) - Method in class neureka.optimization.implementations.ADAMFactory
 
-
create(Tensor<V>) - Method in class neureka.optimization.implementations.RMSPropFactory
+
create(Tensor<V>) - Method in class neureka.optimization.implementations.MomentumFactory
 
-
create(Tensor<V>) - Method in class neureka.optimization.implementations.SGDFactory
+
create(Tensor<V>) - Method in class neureka.optimization.implementations.RMSPropFactory
 
-
create(Tensor<V>) - Method in interface neureka.optimization.OptimizerFactory
+
create(Tensor<V>) - Method in class neureka.optimization.implementations.SGDFactory
 
-
create(Tensor<V>, Tensor<V>) - Method in class neureka.optimization.implementations.ADAMFactory
+
create(Tensor<V>) - Method in interface neureka.optimization.OptimizerFactory
 
-
createDeconvolutionFor(String) - Static method in class neureka.backend.main.operations.ConvUtil
+
createDeconvolutionFor(String) - Static method in class neureka.backend.main.operations.ConvUtil
 
-
CSVHandle - Class in neureka.devices.file
+
CSVHandle - Class in neureka.devices.file
This class is one of many extensions of the AbstractFileHandle which is therefore ultimately an implementation of the FileHandle interface.
-
CSVHandle(String, Map<String, Object>) - Constructor for class neureka.devices.file.CSVHandle
-
 
-
currentCount() - Method in class neureka.devices.ReferenceCounter.ChangeEvent
+
CSVHandle(String, Map<String, Object>) - Constructor for class neureka.devices.file.CSVHandle
 
-
CUSTOM - Enum constant in enum class neureka.devices.opencl.OpenCLDevice.Type
+
currentCount() - Method in class neureka.devices.ReferenceCounter.ChangeEvent
 
-

D

-
-
D1C - Class in neureka.ndim.config.types
+ + + +

D

+
+
D1C - Class in neureka.ndim.config.types
An abstract class for NDConfigurations which are representing tensors of rank 1, meaning the name of this class translates to "Dimension-1-Configuration".
-
D1C() - Constructor for class neureka.ndim.config.types.D1C
+
D1C() - Constructor for class neureka.ndim.config.types.D1C
 
-
D2C - Class in neureka.ndim.config.types
+
D2C - Class in neureka.ndim.config.types
An abstract class for NDConfigurations which are representing tensors of rank 2, meaning the name of this class translates to "Dimension-^2-Configuration".
-
D2C() - Constructor for class neureka.ndim.config.types.D2C
+
D2C() - Constructor for class neureka.ndim.config.types.D2C
 
-
D3C - Class in neureka.ndim.config.types
+
D3C - Class in neureka.ndim.config.types
An abstract class for NDConfigurations which are representing tensors of rank 3, meaning the name of this class translates to "Dimension-3-Configuration".
-
D3C() - Constructor for class neureka.ndim.config.types.D3C
+
D3C() - Constructor for class neureka.ndim.config.types.D3C
 
-
Data<V> - Interface in neureka
+
Data<V> - Interface in neureka
A wrapper type for the raw data array of a tensor/nd-array, which is typically provided by implementations of the Device interface.
-
dataArrayType() - Method in class neureka.dtype.DataType
+
dataArrayType() - Method in class neureka.dtype.DataType
 
-
DataConverter - Class in neureka.common.utility
+
DataConverter - Class in neureka.common.utility
This class is a singleton.
-
DataConverter.ForTensor - Class in neureka.common.utility
+
DataConverter.ForTensor - Class in neureka.common.utility
This is a stateful and parallelized converter for converting the internal data array of a tensor to another data array based on a provided lambda.
-
DataConverter.Utility - Class in neureka.common.utility
+
DataConverter.Utility - Class in neureka.common.utility
This is a static utility class containing the actual conversion logic which is usually referenced by the Converter lambdas via method signatures...
-
dataType - Variable in enum class neureka.Tensor.ImageType
+
dataType() - Method in interface neureka.Data
 
-
dataType() - Method in interface neureka.Data
+
dataType() - Method in class neureka.devices.AbstractDeviceData
 
-
dataType() - Method in class neureka.devices.AbstractDeviceData
-
 
-
DataType<T> - Class in neureka.dtype
+
DataType<T> - Class in neureka.dtype
This class is a Multiton implementation for wrapping and representing type classes.
-
debug() - Method in class neureka.Neureka.Settings
+
dataType - Variable in enum neureka.Tensor.ImageType
+
 
+
debug() - Method in class neureka.Neureka.Settings
 
-
debug(Object) - Method in class neureka.Neureka.Settings
+
debug(Object) - Method in class neureka.Neureka.Settings
This allows you to configure Neureka using a Groovy DSL.
-
Debug() - Constructor for class neureka.Neureka.Settings.Debug
+
Debug() - Constructor for class neureka.Neureka.Settings.Debug
 
-
decrement() - Method in class neureka.devices.ReferenceCounter
+
decrement() - Method in class neureka.devices.ReferenceCounter
 
-
decrement() - Method in interface neureka.ndim.iterator.NDIterator
+
decrement(int[], int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
 
-
decrement() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
-
decrement() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
-
decrement() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
decrement() - Method in interface neureka.ndim.iterator.NDIterator
 
-
decrement(int[], int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
+
decrement() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
decrement() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
decrement() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
 
-
DECREMENT - Enum constant in enum class neureka.devices.ReferenceCounter.ChangeType
+
decrementUsageCount() - Method in class neureka.devices.AbstractDeviceData
 
-
decrementUsageCount() - Method in class neureka.devices.AbstractDeviceData
+
decrementUsageCount() - Method in interface neureka.devices.DeviceData
 
-
decrementUsageCount() - Method in interface neureka.devices.DeviceData
-
 
-
deepClone() - Method in interface neureka.Tensor
+
deepClone() - Method in interface neureka.Tensor
-
This is almost identical to the Tensor.deepCopy() method except that +
This is almost identical to the Tensor.deepCopy() method except that the returned tensor will have autograd support, meaning that the cloning will be part of the autograd computation graph, and backpropagation will traverse the cloned tensor as well.
-
deepCopy() - Method in interface neureka.Nda
+
deepCopy() - Method in interface neureka.Nda
This method creates and returns a new nd-array instance which is not only a copy of the configuration of this nd-array but also a copy of the underlying data array.
-
deepCopy() - Method in interface neureka.Tensor
+
deepCopy() - Method in interface neureka.Tensor
This method creates and returns a new nd-array instance which is not only a copy of the configuration of this nd-array but also a copy of the underlying data array.
-
DEFAULT - Enum constant in enum class neureka.devices.opencl.OpenCLDevice.Type
-
 
-
delete() - Method in interface neureka.MutateTensor
+
delete() - Method in interface neureka.MutateTensor
Although tensors will be garbage collected when they are not strongly referenced, there is also the option to manually free up the tensor and its associated data in a native environment.
-
dependsOn(int) - Method in interface neureka.math.Function
+
dependsOn(int) - Method in interface neureka.math.Function
Use this to determine if this function directly or indirectly references an input with the provided index.
-
dependsOn(int) - Method in class neureka.math.implementations.FunctionConstant
+
dependsOn(int) - Method in class neureka.math.implementations.FunctionConstant
 
-
dependsOn(int) - Method in class neureka.math.implementations.FunctionInput
+
dependsOn(int) - Method in class neureka.math.implementations.FunctionInput
 
-
dependsOn(int) - Method in class neureka.math.implementations.FunctionNode
+
dependsOn(int) - Method in class neureka.math.implementations.FunctionNode
 
-
dependsOn(int) - Method in class neureka.math.implementations.FunctionVariable
+
dependsOn(int) - Method in class neureka.math.implementations.FunctionVariable
 
-
derivationCode() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
derivationCode() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarExp
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarExp
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
 
-
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
+
derivationCode() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
 
-
derive(double[], int) - Method in interface neureka.math.Function
+
derive(int[], Tensor[], Function<Integer, Tensor<?>>) - Static method in class neureka.backend.main.operations.operator.Multiplication
+
 
+
derive(double[], int, int) - Method in interface neureka.math.Function
+
+
Calculates the derivative of a particular input with respect to the output of this Function + based on the provided array of inputs, an index targeting the input to be derived + and an index for input dependent indexing.
+
+
derive(double[], int) - Method in interface neureka.math.Function
Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs and an index targeting the input to be derived.
-
derive(double[], int) - Method in class neureka.math.implementations.FunctionConstant
+
derive(Tensor<T>[], int, int) - Method in interface neureka.math.Function
 
-
derive(double[], int) - Method in class neureka.math.implementations.FunctionInput
+
derive(Tensor<T>[], int) - Method in interface neureka.math.Function
 
-
derive(double[], int) - Method in class neureka.math.implementations.FunctionNode
+
derive(List<Tensor<T>>, int, int) - Method in interface neureka.math.Function
 
-
derive(double[], int) - Method in class neureka.math.implementations.FunctionVariable
-
 
-
derive(double[], int, int) - Method in interface neureka.math.Function
-
-
Calculates the derivative of a particular input with respect to the output of this Function - based on the provided array of inputs, an index targeting the input to be derived - and an index for input dependent indexing.
-
-
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionConstant
+
derive(List<Tensor<T>>, int) - Method in interface neureka.math.Function
 
-
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionInput
+
derive(double[], int) - Method in class neureka.math.implementations.FunctionConstant
 
-
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionNode
+
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionConstant
 
-
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionVariable
+
derive(double[], int) - Method in class neureka.math.implementations.FunctionInput
 
-
derive(int[], Tensor[], Function<Integer, Tensor<?>>) - Static method in class neureka.backend.main.operations.operator.Multiplication
+
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionInput
 
-
derive(List<Tensor<T>>, int) - Method in interface neureka.math.Function
+
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionNode
 
-
derive(List<Tensor<T>>, int, int) - Method in interface neureka.math.Function
+
derive(double[], int) - Method in class neureka.math.implementations.FunctionNode
 
-
derive(Tensor<T>[], int) - Method in interface neureka.math.Function
+
derive(double[], int) - Method in class neureka.math.implementations.FunctionVariable
 
-
derive(Tensor<T>[], int, int) - Method in interface neureka.math.Function
+
derive(double[], int, int) - Method in class neureka.math.implementations.FunctionVariable
 
-
detach() - Method in interface neureka.MutateTensor
+
detach() - Method in interface neureka.MutateTensor
This method detaches this tensor from its underlying computation-graph or simply does nothing if no graph is present.
Nodes within a computation graph are instances of the "GraphNode" class which are also simple components of the tensors they represent in the graph.
-
detached() - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
detached() - Method in class neureka.fluent.slicing.AxisSliceBuilder
 
-
detached() - Method in class neureka.fluent.slicing.SliceBuilder
+
detached() - Method in class neureka.fluent.slicing.SliceBuilder
 
-
detached() - Method in interface neureka.fluent.slicing.states.AxisOrGet
+
detached() - Method in interface neureka.fluent.slicing.states.AxisOrGet
This method concludes the slicing API by performing the actual slicing and returning the resulting Tensor instance based on the previously specified slice configuration...
-
detached() - Method in interface neureka.fluent.slicing.states.AxisOrGetTensor
+
detached() - Method in interface neureka.fluent.slicing.states.AxisOrGetTensor
This method concludes the slicing API by performing the actual slicing and returning the resulting Tensor instance based on the previously specified slice configuration...
-
detached() - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGetTensor
+
detached() - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGetTensor
This method concludes the slicing API by performing the actual slicing and returning the resulting Tensor instance based on the previously specified slice configuration...
-
detached() - Method in interface neureka.Tensor
+
detached() - Method in interface neureka.Tensor
This method returns a new tensor detached from any underlying computation-graph or simply does nothing if no graph is present.
Nodes within a computation graph are instances of the "GraphNode" class which are also simple components of the tensors they represent in the graph.
-
device() - Method in class neureka.backend.api.BackendExtension.DeviceOption
+
device() - Method in class neureka.backend.api.BackendExtension.DeviceOption
 
-
Device<V> - Interface in neureka.devices
+
Device<V> - Interface in neureka.devices
Implementations of this represent computational devices for storing tensors (instances of the Tensor<V> class), which may also expose a useful API for executing operations on tensors (used in backend operations).
-
Device.Access<V> - Interface in neureka.devices
+
Device.Access<V> - Interface in neureka.devices
Implementations of this represent the access to tensors stored on a device in order to read from or write to said tensor.
-
Device.In - Interface in neureka.devices
+
Device.In - Interface in neureka.devices
The second part of the method chain of the fluent API for executing tensors on this Device temporarily.
-
Device.Writer - Interface in neureka.devices
+
Device.Writer - Interface in neureka.devices
Instances of this complete a request for writing to an accessed tensor stored on a device.
-
DeviceAlgorithm<C extends DeviceAlgorithm<C>> - Interface in neureka.backend.api
+
DeviceAlgorithm<C extends DeviceAlgorithm<C>> - Interface in neureka.backend.api
A DeviceAlgorithm is an advanced form of Algorithm which delegates the execution to implementations of ImplementationFor specific Device types.
-
DeviceCleaner - Interface in neureka.devices
+
DeviceCleaner - Interface in neureka.devices
 
-
DeviceData<V> - Interface in neureka.devices
+
DeviceData<V> - Interface in neureka.devices
A sub-interface of the Data interface providing more device specific methods.
-
DeviceOption(Device<?>, double) - Constructor for class neureka.backend.api.BackendExtension.DeviceOption
+
DeviceOption(Device<?>, double) - Constructor for class neureka.backend.api.BackendExtension.DeviceOption
 
-
DeviceQuery - Class in neureka.devices.opencl.utility
+
DeviceQuery - Class in neureka.devices.opencl.utility
A program that queries and prints information about all available devices.
-
DimFit - Class in neureka.backend.main.operations.other
+
DimFit - Class in neureka.backend.main.operations.other
 
-
DimFit() - Constructor for class neureka.backend.main.operations.other.DimFit
+
DimFit() - Constructor for class neureka.backend.main.operations.other.DimFit
 
-
dimtrim() - Method in interface neureka.Tensor
-
-
This creates a new tensor with the same underlying Data and whose shape is trimmed.
-
-
dimTrim() - Method in class neureka.math.Functions
+
DimTrim - Class in neureka.backend.main.operations.other
 
-
DimTrim - Class in neureka.backend.main.operations.other
+
DimTrim() - Constructor for class neureka.backend.main.operations.other.DimTrim
 
-
DimTrim() - Constructor for class neureka.backend.main.operations.other.DimTrim
+
dimTrim() - Method in class neureka.math.Functions
 
-
dispatch(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
dimtrim() - Method in interface neureka.Tensor
+
+
This creates a new tensor with the same underlying Data and whose shape is trimmed.
+
+
dispatch(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
dispose() - Method in interface neureka.backend.api.BackendExtension
+
dispose() - Method in interface neureka.backend.api.BackendExtension
Tells this extension to dispose itself.
-
dispose() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
dispose() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
dispose() - Method in class neureka.backend.cpu.CPUBackend
+
dispose() - Method in class neureka.backend.cpu.CPUBackend
 
-
dispose() - Method in class neureka.backend.ocl.CLBackend
+
dispose() - Method in class neureka.backend.ocl.CLBackend
This method will free all the resources occupied by this context, meaning that all platforms and their devices will be disposed.
-
dispose() - Method in interface neureka.devices.Device
+
dispose() - Method in interface neureka.devices.Device
This method signals the device to get ready for garbage collection.
-
dispose() - Method in class neureka.devices.file.FileDevice
+
dispose() - Method in class neureka.devices.file.FileDevice
 
-
dispose() - Method in class neureka.devices.host.CPU
+
dispose() - Method in class neureka.devices.host.CPU
This method will shut down the internal thread-pool used by this class to execute JVM/CPU based operations in parallel.
-
dispose() - Method in class neureka.devices.opencl.OpenCLDevice
+
dispose() - Method in class neureka.devices.opencl.OpenCLDevice
This method tells the to restore all tensors stored on it and release all resources.
-
dispose() - Method in class neureka.devices.opencl.OpenCLPlatform
+
dispose() - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
div() - Method in class neureka.math.Functions
+
div() - Method in class neureka.math.Functions
 
-
div(Tensor<V>) - Method in interface neureka.Tensor
+
div(Tensor<V>) - Method in interface neureka.Tensor
This method will produce the quotient of two tensors with the same rank (or two ranks which can be made compatible with padding ones), where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
-
div(V) - Method in interface neureka.Tensor
+
div(V) - Method in interface neureka.Tensor
 
-
divAssign() - Method in class neureka.math.Functions
+
divAssign() - Method in class neureka.math.Functions
 
-
divAssign(Tensor<T>) - Method in interface neureka.MutateTensor
+
divAssign(Tensor<T>) - Method in interface neureka.MutateTensor
 
-
divide(int, int, CPU.RangeWorkload) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
+
divide(int, CPU.RangeWorkload) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
 
-
divide(int, CPU.RangeWorkload) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
+
divide(int, int, CPU.RangeWorkload) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
 
-
Divider(ExecutorService) - Constructor for class neureka.devices.host.concurrent.WorkScheduler.Divider
+
Divider(ExecutorService) - Constructor for class neureka.devices.host.concurrent.WorkScheduler.Divider
 
-
Division - Class in neureka.backend.main.operations.operator
+
Division - Class in neureka.backend.main.operations.operator
 
-
Division() - Constructor for class neureka.backend.main.operations.operator.Division
+
Division() - Constructor for class neureka.backend.main.operations.operator.Division
 
-
doesNotExist() - Method in interface neureka.Nda.Item
+
doesNotExist() - Method in interface neureka.Nda.Item
 
-
dot() - Method in class neureka.math.Functions
-
 
-
dot(Tensor<V>) - Method in interface neureka.Tensor
-
-
Performs a dot product between the last dimension of this tensor - and the first dimension of the provided tensor.
-
-
DOT - Class in neureka.backend.main.operations.linear.internal.blas
+
DOT - Class in neureka.backend.main.operations.linear.internal.blas
The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are elements of vectors x and y.
-
DOT() - Constructor for class neureka.backend.main.operations.linear.internal.blas.DOT
+
DOT() - Constructor for class neureka.backend.main.operations.linear.internal.blas.DOT
+
 
+
dot() - Method in class neureka.math.Functions
 
-
DotProduct - Class in neureka.backend.main.operations.linear
+
dot(Tensor<V>) - Method in interface neureka.Tensor
+
+
Performs a dot product between the last dimension of this tensor + and the first dimension of the provided tensor.
+
+
DotProduct - Class in neureka.backend.main.operations.linear
 
-
DotProduct() - Constructor for class neureka.backend.main.operations.linear.DotProduct
+
DotProduct() - Constructor for class neureka.backend.main.operations.linear.DotProduct
 
-
DotProductAlgorithm - Class in neureka.backend.main.algorithms
+
DotProductAlgorithm - Class in neureka.backend.main.algorithms
 
-
DotProductAlgorithm() - Constructor for class neureka.backend.main.algorithms.DotProductAlgorithm
+
DotProductAlgorithm() - Constructor for class neureka.backend.main.algorithms.DotProductAlgorithm
 
-
doubleToBigInteger(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
doubleToBigInteger(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
doubleToBool(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
doubleToBool(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
doubleToByte(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
doubleToByte(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
doubleToFloat(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
doubleToFloat(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
doubleToInt(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
doubleToInt(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
doubleToLong(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
doubleToLong(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
doubleToShort(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
doubleToShort(double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
dtype() - Method in class neureka.Neureka.Settings
+
dtype() - Method in class neureka.Neureka.Settings
 
-
dtype(Object) - Method in class neureka.Neureka.Settings
+
dtype(Object) - Method in class neureka.Neureka.Settings
This allows you to configure Neureka using a Groovy DSL.
-
DType() - Constructor for class neureka.Neureka.Settings.DType
+
DType() - Constructor for class neureka.Neureka.Settings.DType
 
-

E

-
-
EIGHT - Enum constant in enum class neureka.devices.host.concurrent.Parallelism
-
-
8
-
-
elements() - Method in interface neureka.Shape
+ + + +

E

+
+
elements() - Method in interface neureka.Shape
 
-
ElementwiseAlgorithm - Class in neureka.backend.main.algorithms
+
ElementwiseAlgorithm - Class in neureka.backend.main.algorithms
This is lambda based Algorithm implementation providing some basic functionality for implementing custom activation functions.
-
ElementwiseAlgorithm() - Constructor for class neureka.backend.main.algorithms.ElementwiseAlgorithm
+
ElementwiseAlgorithm() - Constructor for class neureka.backend.main.algorithms.ElementwiseAlgorithm
 
-
ElemWiseUtil - Class in neureka.backend.main.operations
+
ElemWiseUtil - Class in neureka.backend.main.operations
Methods inside this utility class execute only some ExecutionCall arguments in groups if their total number exceeds the arity of an operation.
-
ElemWiseUtil() - Constructor for class neureka.backend.main.operations.ElemWiseUtil
+
ElemWiseUtil() - Constructor for class neureka.backend.main.operations.ElemWiseUtil
 
-
endsFrom(int[]) - Static method in class neureka.backend.main.operations.other.DimTrim
+
endsFrom(int[]) - Static method in class neureka.backend.main.operations.other.DimTrim
 
-
ENVIRONMENT - Static variable in class neureka.devices.host.machine.ConcreteMachine
+
ENVIRONMENT - Static variable in class neureka.devices.host.machine.ConcreteMachine
 
-
equals(Object) - Method in class neureka.common.utility.Cache.LazyEntry
+
equals(Object) - Method in class neureka.common.utility.Cache.LazyEntry
 
-
equals(Object) - Method in class neureka.devices.host.machine.BasicMachine
+
equals(Object) - Method in class neureka.devices.host.machine.BasicMachine
 
-
equals(Object) - Method in class neureka.devices.host.machine.CommonMachine
+
equals(Object) - Method in class neureka.devices.host.machine.CommonMachine
 
-
equals(Object) - Method in class neureka.devices.host.machine.ConcreteMachine
+
equals(Object) - Method in class neureka.devices.host.machine.ConcreteMachine
 
-
equals(Object) - Method in class neureka.devices.host.machine.Hardware
+
equals(Object) - Method in class neureka.devices.host.machine.Hardware
 
-
equals(Object) - Method in class neureka.devices.opencl.KernelCode
+
equals(Object) - Method in class neureka.devices.opencl.KernelCode
 
-
equals(Object) - Method in class neureka.dtype.DataType
+
equals(Object) - Method in class neureka.dtype.DataType
 
-
equals(Object) - Method in class neureka.ndim.config.AbstractNDC
+
equals(Object) - Method in class neureka.ndim.config.AbstractNDC
 
-
equals(NDConfiguration) - Method in class neureka.ndim.config.AbstractNDC
+
equals(NDConfiguration) - Method in class neureka.ndim.config.AbstractNDC
 
-
equals(NDConfiguration) - Method in interface neureka.ndim.config.NDConfiguration
+
equals(NDConfiguration) - Method in interface neureka.ndim.config.NDConfiguration
 
-
error() - Method in class neureka.autograd.ADTarget
+
error() - Method in class neureka.autograd.ADTarget
 
-
errorCorrectionSupport() - Method in class neureka.devices.opencl.OpenCLDevice
+
errorCorrectionSupport() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
Estimator(boolean) - Constructor for class neureka.backend.api.Call.Validator.Estimator
+
Estimator(boolean) - Constructor for class neureka.backend.api.Call.Validator.Estimator
 
-
every(Predicate<Integer>) - Method in interface neureka.Shape
-
 
-
every(Predicate<V>) - Method in interface neureka.Nda
+
every(Predicate<V>) - Method in interface neureka.Nda
Iterates over every element of this nd-array, and checks whether all elements are true according to the provided lambda.
-
EXCELLENT - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
every(Predicate<Integer>) - Method in interface neureka.Shape
+
 
+
EXCELLENT - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
execute() - Method in class neureka.autograd.JITProp
+
execute() - Method in class neureka.autograd.JITProp
This method triggers the continuation of the back-propagation which has been put on hold by saving the pending graph nodes inside this class.
-
execute(boolean, double[], double[], double[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
+
execute(Function, ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.Execution
 
-
execute(boolean, float[], float[], float[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
+
execute(Function, ExecutionCall<?>) - Method in interface neureka.backend.api.Operation
 
-
execute(boolean, int[], int[], int[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
-
 
-
execute(boolean, long[], long[], long[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
-
 
-
execute(int) - Method in interface neureka.devices.host.CPU.IndexedWorkload
-
 
-
execute(int, int) - Method in interface neureka.devices.host.CPU.RangeWorkload
-
 
-
execute(Call<?>) - Method in interface neureka.math.Function
-
-
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions. - Use this to pass more context information for execution of input tensors.
-
-
execute(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.main.internal.FinalExecutor
+
execute(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
 
-
execute(Args, Tensor<?>...) - Method in interface neureka.math.Function
-
-
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions. -
- Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
-
-
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionConstant
+
execute(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
 
-
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionInput
+
execute(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionNode
+
execute(boolean, double[], double[], double[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
 
-
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionVariable
+
execute(boolean, float[], float[], float[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
 
-
execute(Function, ExecutionCall<?>) - Method in interface neureka.backend.api.Operation
+
execute(boolean, long[], long[], long[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.functions.SiLU
+
execute(boolean, int[], int[], int[], int, int, int) - Static method in class neureka.backend.main.implementations.matmul.CPUMatMul
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.indexer.Product
+
execute(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.main.internal.FinalExecutor
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.indexer.Summation
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.indexer.Product
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.linear.Convolution
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.indexer.Summation
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Addition
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.linear.Convolution
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Division
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.linear.MatMul
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Modulo
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Addition
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Multiplication
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Division
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Power
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Modulo
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Subtraction
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Multiplication
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.other.AssignLeft
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Power
 
-
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.other.Cat
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.operator.Subtraction
 
-
execute(Function, ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.Execution
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.other.AssignLeft
 
-
execute(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
execute(Function, ExecutionCall<?>) - Method in class neureka.backend.main.operations.other.Cat
 
-
execute(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
execute(int) - Method in interface neureka.devices.host.CPU.IndexedWorkload
 
-
execute(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
execute(int, int) - Method in interface neureka.devices.host.CPU.RangeWorkload
 
-
execute(Tensor<?>...) - Method in interface neureka.math.Function.Callable
+
execute(Tensor<?>...) - Method in interface neureka.math.Function.Callable
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
-
execute(Tensor<?>...) - Method in interface neureka.math.Function
+
execute(Call<?>) - Method in interface neureka.math.Function
+
+
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions. + Use this to pass more context information for execution of input tensors.
+
+
execute(Args, Tensor<?>...) - Method in interface neureka.math.Function
+
+
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions. +
+ Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
+
+
execute(Tensor<?>...) - Method in interface neureka.math.Function
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
-
execute(Tensor<?>[], int) - Method in interface neureka.math.Function
+
execute(Tensor<?>[], int) - Method in interface neureka.math.Function
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
-
executeChange() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
+
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionConstant
+
 
+
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionInput
+
 
+
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionNode
+
 
+
execute(Args, Tensor<?>...) - Method in class neureka.math.implementations.FunctionVariable
+
 
+
executeChange() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
-
This method will trigger the actual state change identified by the Component.IsBeing - constant returned by the Component.OwnerChangeRequest.type() method.
+
This method will trigger the actual state change identified by the Component.IsBeing + constant returned by the Component.OwnerChangeRequest.type() method.
-
executeDerive(Tensor<?>[], int) - Method in interface neureka.math.Function
+
executeDerive(Tensor<?>[], int, int) - Method in interface neureka.math.Function
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
-
executeDerive(Tensor<?>[], int, int) - Method in interface neureka.math.Function
+
executeDerive(Tensor<?>[], int) - Method in interface neureka.math.Function
Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
-
executeDeviceAlgorithm(ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
executeDeviceAlgorithm(ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
executeFor(Function, ExecutionCall<? extends Device<?>>, FinalExecutor) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
executeFor(Function, ExecutionCall<? extends Device<?>>, FinalExecutor) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
executeOnCommonDevice(ExecutionCall<?>, Supplier<R>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
executeOnCommonDevice(ExecutionCall<?>, Supplier<R>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
executeRecursively(String, ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.main.operations.ConvUtil
+
executeRecursively(String, ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.main.operations.ConvUtil
 
-
Execution - Interface in neureka.backend.api.fun
+
Execution - Interface in neureka.backend.api.fun
Implementations of this functional interface is supposed to be the final execution procedure responsible for dispatching the execution further into the backend.
-
ExecutionCall<D extends Device<?>> - Class in neureka.backend.api
+
ExecutionCall<D extends Device<?>> - Class in neureka.backend.api
This class is a simple container holding references to a targeted Device, Operation and maybe some case specific meta Args needed to execute an array of input tensors which are also wrapped by this.
-
ExecutionCall.Builder<D extends Device<?>> - Class in neureka.backend.api
+
ExecutionCall.Builder<D extends Device<?>> - Class in neureka.backend.api
 
-
ExecutionPreparation - Interface in neureka.backend.api.fun
+
ExecutionPreparation - Interface in neureka.backend.api.fun
An Algorithm will typically produce a result when executing an ExecutionCall.
-
exists() - Method in interface neureka.Nda.Item
+
exists() - Method in interface neureka.Nda.Item
 
-
exp() - Method in class neureka.math.Functions
-
 
-
exp() - Method in interface neureka.Tensor
-
-
This method is a functionally identical to the following alternatives:
-
-
Exp - Class in neureka.backend.main.operations.functions
+
EXP - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Exp() - Constructor for class neureka.backend.main.operations.functions.Exp
+
Exp - Class in neureka.backend.main.operations.functions
 
-
EXP - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
Exp() - Constructor for class neureka.backend.main.operations.functions.Exp
 
-
extension() - Method in class neureka.devices.file.IDXHandle
+
exp() - Method in class neureka.math.Functions
 
-
extension() - Method in interface neureka.devices.file.FileHandle
+
exp() - Method in interface neureka.Tensor
+
+
This method is a functionally identical to the following alternatives:
+
+
extension() - Method in interface neureka.devices.file.FileHandle
The file ending which comes after the '.' character...
-
Extensions - Class in neureka.backend.api
+
Extensions - Class in neureka.backend.api
This is an internal class for managing the extension of any given BackendContext class.
-
Extensions() - Constructor for class neureka.backend.api.Extensions
+
Extensions() - Constructor for class neureka.backend.api.Extensions
 
-

F

-
-
F32 - Class in neureka.dtype.custom
-
 
-
F32() - Constructor for class neureka.dtype.custom.F32
+ + + +

F

+
+
F32 - Class in neureka.dtype.custom
 
-
F64 - Class in neureka.dtype.custom
+
F32() - Constructor for class neureka.dtype.custom.F32
 
-
F64() - Constructor for class neureka.dtype.custom.F64
+
F64 - Class in neureka.dtype.custom
 
-
FACTORY - Static variable in interface neureka.devices.file.FileHandle
+
F64() - Constructor for class neureka.dtype.custom.F64
 
-
FallbackAlgorithm - Class in neureka.backend.api.template.algorithms
+
FACTORY - Static variable in interface neureka.devices.file.FileHandle
 
-
FallbackAlgorithm(String, int, Operation) - Constructor for class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
FallbackAlgorithm - Class in neureka.backend.api.template.algorithms
 
-
FALSE - Enum constant in enum class neureka.ndim.iterator.NDIterator.NonVirtual
+
FallbackAlgorithm(String, int, Operation) - Constructor for class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
FANCY - Enum constant in enum class neureka.autograd.GraphNode.Print
+
fastGaus() - Method in class neureka.math.Functions
 
-
fastGaus() - Method in class neureka.math.Functions
+
fastTanh() - Method in class neureka.math.Functions
 
-
fastTanh() - Method in class neureka.math.Functions
-
 
-
FEDORA - Enum constant in enum class neureka.devices.opencl.utility.Messages.Tips
-
 
-
FileDevice - Class in neureka.devices.file
+
FileDevice - Class in neureka.devices.file
The FileDevice is a Device implementation responsible for reading tensors from and or writing them to a given directory.
-
FileHandle<FinalType,ValType> - Interface in neureka.devices.file
+
FileHandle<FinalType,ValType> - Interface in neureka.devices.file
 
-
fileHandleOf(Tensor<?>) - Method in class neureka.devices.file.FileDevice
+
fileHandleOf(Tensor<?>) - Method in class neureka.devices.file.FileDevice
 
-
Filler<T> - Interface in neureka.ndim
+
Filler<T> - Interface in neureka.ndim
Implementations of this ought to map the index of a tensor entry to a value which should be placed at that entry position.
-
fillRandomly(T, long) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
+
fillRandomly(T, Arg.Seed) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
 
-
fillRandomly(T, String) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
+
fillRandomly(T, String) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
 
-
fillRandomly(T, Arg.Seed) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
+
fillRandomly(T, long) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
 
-
filter(Predicate<Integer>) - Method in interface neureka.Shape
-
 
-
filter(Predicate<V>) - Method in interface neureka.Nda
+
filter(Predicate<V>) - Method in interface neureka.Nda
A convenience method for stream().filter( predicate ).
-
FinalExecutor - Interface in neureka.backend.main.internal
+
filter(Predicate<Integer>) - Method in interface neureka.Shape
+
 
+
FinalExecutor - Interface in neureka.backend.main.internal
 
-
find(Class<D>, String...) - Static method in interface neureka.devices.Device
+
find(Class<E>) - Method in class neureka.backend.api.BackendContext
-
This method returns Device instances matching - the given search parameters.
+
Returns an Optional instance of the provided BackendExtension type + or an empty Optional if no extension of that type was found.
-
find(Class<E>) - Method in class neureka.backend.api.BackendContext
+
find(String) - Method in interface neureka.backend.api.BackendExtension
-
Returns an Optional instance of the provided BackendExtension type - or an empty Optional if no extension of that type was found.
+
The BackendContext does not handle Device instances directly.
-
find(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
+
find(String) - Method in class neureka.backend.cpu.CPUBackend
+
 
+
find(String) - Method in class neureka.backend.ocl.CLBackend
+
 
+
find(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
This method finds a component of the given type/class - and returns it as an Optional which may or may not be empty.
+ and returns it as an Optional which may or may not be empty.
-
find(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
+
find(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
This method finds a component of the given type/class - and returns it as an Optional which may or may not be empty.
+ and returns it as an Optional which may or may not be empty.
-
find(String) - Method in interface neureka.backend.api.BackendExtension
+
find(String...) - Static method in interface neureka.devices.Device
-
The BackendContext does not handle Device instances directly.
+
This method returns Device instances matching + the given search parameter.
-
find(String) - Method in class neureka.backend.cpu.CPUBackend
-
 
-
find(String) - Method in class neureka.backend.ocl.CLBackend
-
 
-
find(String...) - Static method in interface neureka.devices.Device
+
find(Class<D>, String...) - Static method in interface neureka.devices.Device
This method returns Device instances matching - the given search parameter.
+ the given search parameters.
-
findAdHocKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
+
findAdHocKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
findCaptured() - Method in interface neureka.autograd.ADAction
+
findCaptured() - Method in interface neureka.autograd.ADAction
Finds captured Tensor instances in this current action using reflection (This is usually a partial derivative).
-
findComponentIn(String, int) - Static method in class neureka.math.parsing.ParseUtil
+
findComponentIn(String, int) - Static method in class neureka.math.parsing.ParseUtil
 
-
findOrCompileAdHocKernel(String, Supplier<String>) - Method in class neureka.devices.opencl.OpenCLDevice
+
findOrCompileAdHocKernel(String, Supplier<String>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
findParametersIn(String, int) - Static method in class neureka.math.parsing.ParseUtil
+
findParametersIn(String, int) - Static method in class neureka.math.parsing.ParseUtil
 
-
findRootTensor() - Method in class neureka.framing.Relation
+
findRootTensor() - Method in class neureka.framing.Relation
This method tries to find the root data ancestor of this tensor.
-
findTip() - Static method in class neureka.devices.opencl.utility.Messages
+
findTip() - Static method in class neureka.devices.opencl.utility.Messages
 
-
finishedCount() - Method in class neureka.autograd.JITProp
+
finishedCount() - Method in class neureka.autograd.JITProp
 
-
first(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
+
first(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
 
-
flatMap(Function<V, Stream<R>>) - Method in interface neureka.Nda
+
flatMap(Function<V, Stream<R>>) - Method in interface neureka.Nda
A convenience method for nda.stream().flatMap( mapper ), - which turns this Nda into a Stream of its items.
+ which turns this Nda into a Stream of its items.
-
flatten(Function, ExecutionCall<D>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
flatten(Function, ExecutionCall<D>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
flattenForIndexer(Function, ExecutionCall<D>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
flattenForIndexer(Function, ExecutionCall<D>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
floatToBigInteger(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
floatToBigInteger(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
floatToByte(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
floatToByte(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
floatToDouble(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
floatToDouble(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
floatToInt(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
floatToInt(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
floatToLong(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
floatToLong(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
floatToShort(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
floatToShort(float[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
forDevice(Class<? extends D>) - Method in class neureka.backend.api.ini.BackendRegistry
+
forDevice(Class<? extends D>) - Method in class neureka.backend.api.ini.BackendRegistry
 
-
forEachDerivative(BiConsumer<GraphNode<V>, ADAction>) - Method in class neureka.autograd.GraphNode
+
forEachDerivative(BiConsumer<GraphNode<V>, ADAction>) - Method in class neureka.autograd.GraphNode
 
-
forEachTarget(Consumer<GraphNode<V>>) - Method in class neureka.autograd.GraphNode
+
forEachTarget(Consumer<GraphNode<V>>) - Method in class neureka.autograd.GraphNode
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.F32
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.F32
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.F64
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.F64
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I16
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I16
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I32
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I32
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I64
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I64
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I8
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.I8
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI16
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI16
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI32
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI32
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI64
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI64
 
-
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI8
+
foreignHolderBytesToTarget(byte[]) - Method in class neureka.dtype.custom.UI8
 
-
foreignHolderBytesToTarget(byte[]) - Method in interface neureka.dtype.NumericType
+
foreignHolderBytesToTarget(byte[]) - Method in interface neureka.dtype.NumericType
 
-
forInputs(Tensor<?>[], Supplier<Result>) - Static method in class neureka.backend.main.memory.MemValidator
+
forInputs(Tensor<?>[], Supplier<Result>) - Static method in class neureka.backend.main.memory.MemValidator
 
-
format(String, Object...) - Static method in class neureka.common.utility.LogUtil
+
format(String, Object...) - Static method in class neureka.common.utility.LogUtil
 
-
forOperation(Class<? extends Operation>) - Method in interface neureka.backend.api.ini.ReceiveForDevice
+
forOperation(Class<? extends Operation>) - Method in interface neureka.backend.api.ini.ReceiveForDevice
 
-
ForTensor(Tensor<?>) - Constructor for class neureka.common.utility.DataConverter.ForTensor
+
ForTensor(Tensor<?>) - Constructor for class neureka.common.utility.DataConverter.ForTensor
 
-
FORWARD_AND_BACKWARD - Enum constant in enum class neureka.backend.api.AutoDiffMode
-
 
-
FORWARD_ONLY - Enum constant in enum class neureka.backend.api.AutoDiffMode
-
 
-
FOUR - Enum constant in enum class neureka.devices.host.concurrent.Parallelism
+
frame() - Method in interface neureka.Tensor
-
4
+
This is a functionally identical alternative to Tensor.getFrame().
-
frame() - Method in interface neureka.Tensor
+
free(Tensor<T>) - Method in interface neureka.devices.Device
-
This is a functionally identical alternative to Tensor.getFrame().
+
Use this to remove the provided tensor from this Device!

-
free() - Method in class neureka.devices.file.IDXHandle
+
free(Tensor<T>) - Method in class neureka.devices.file.FileDevice
 
-
free() - Method in interface neureka.devices.file.FileHandle
+
free() - Method in interface neureka.devices.file.FileHandle
An implementation of this method ought to "free" up the memory used to store a tensor.
-
free(Tensor<T>) - Method in interface neureka.devices.Device
-
-
Use this to remove the provided tensor from this Device!

-
-
free(Tensor<T>) - Method in class neureka.devices.file.FileDevice
+
free(Tensor<T>) - Method in class neureka.devices.host.CPU
 
-
free(Tensor<T>) - Method in class neureka.devices.host.CPU
+
free(Tensor<T>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
free(Tensor<T>) - Method in class neureka.devices.opencl.OpenCLDevice
-
 
-
from(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
from(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
This method returns an instance of this very AxisSliceBuilder instance disguised by the To interface.
-
from(int) - Method in interface neureka.fluent.slicing.states.FromOrAt
+
from(int) - Method in interface neureka.fluent.slicing.states.FromOrAt
This is the starting point for defining the slice range of a specified axis within the method chain/graph exposed by the slice builder API.
-
from(int) - Method in interface neureka.fluent.slicing.states.FromOrAtTensor
+
from(int) - Method in interface neureka.fluent.slicing.states.FromOrAtTensor
This is the starting point for defining the slice range of a specified axis within the method chain/graph exposed by the slice builder API.
-
FromOrAt<V> - Interface in neureka.fluent.slicing.states
+
FromOrAt<V> - Interface in neureka.fluent.slicing.states
This is the starting point for defining the slice range of a specified axis within the call transition graph exposed by the slice builder API.
-
FromOrAtTensor<V> - Interface in neureka.fluent.slicing.states
-
 
-
FULL_DELETE - Enum constant in enum class neureka.devices.ReferenceCounter.ChangeType
+
FromOrAtTensor<V> - Interface in neureka.fluent.slicing.states
 
-
fullDelete() - Method in class neureka.devices.ReferenceCounter
+
fullDelete() - Method in class neureka.devices.ReferenceCounter
 
-
fully() - Method in interface neureka.devices.Device.Writer
+
fully() - Method in interface neureka.devices.Device.Writer
A convenience method for specifying that the entire data array of the accessed tensor should be written to.
-
FunAlgorithm - Class in neureka.backend.api.template.algorithms
+
FunAlgorithm - Class in neureka.backend.api.template.algorithms
 
-
FunAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.FunAlgorithm
+
FunAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.FunAlgorithm
 
-
Function - Interface in neureka.math
+
Function - Interface in neureka.math
Besides the Tensor class, which is the core class of Neureka, this interface and its implementations represents the second most important feature of this library.
-
Function.Callable - Interface in neureka.math
+
Function.Callable - Interface in neureka.math
An API for calling a Function after having specified - a set of Arg instances through the Function.with(Args) + a set of Arg instances through the Function.with(Args) method.
-
FunctionCache - Class in neureka.math
+
FunctionCache - Class in neureka.math
This class is part of a given BackendContext instance responsible for caching Function references based on - their String representation generated by Object.toString() + their String representation generated by Object.toString() as well as caching of results for active functions.
-
FunctionCache() - Constructor for class neureka.math.FunctionCache
+
FunctionCache() - Constructor for class neureka.math.FunctionCache
 
-
FunctionConstant - Class in neureka.math.implementations
+
FunctionConstant - Class in neureka.math.implementations
Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing constant numeric values to a function.
-
FunctionConstant(String) - Constructor for class neureka.math.implementations.FunctionConstant
+
FunctionConstant(String) - Constructor for class neureka.math.implementations.FunctionConstant
 
-
FunctionInput - Class in neureka.math.implementations
+
FunctionInput - Class in neureka.math.implementations
Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing inputs to a function.
-
FunctionNode - Class in neureka.math.implementations
+
FunctionNode - Class in neureka.math.implementations
The most common type of Function which references other Functions to form an abstract syntax tree.
-
FunctionNode(Operation, List<Function>, boolean) - Constructor for class neureka.math.implementations.FunctionNode
+
FunctionNode(Operation, List<Function>, boolean) - Constructor for class neureka.math.implementations.FunctionNode
 
-
FunctionParser - Class in neureka.math.parsing
+
FunctionParser - Class in neureka.math.parsing
The FunctionParser takes a BackendContext instance based on which - it builds Function implementation instances, usually by parsing Strings.
+ it builds Function implementation instances, usually by parsing Strings.
-
FunctionParser(BackendContext) - Constructor for class neureka.math.parsing.FunctionParser
+
FunctionParser(BackendContext) - Constructor for class neureka.math.parsing.FunctionParser
 
-
Functions - Class in neureka.math
+
Functions - Class in neureka.math
 
-
Functions(boolean) - Constructor for class neureka.math.Functions
+
Functions(boolean) - Constructor for class neureka.math.Functions
 
-
FunctionVariable - Class in neureka.math.implementations
+
FunctionVariable - Class in neureka.math.implementations
Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing indexed inputs to a function.
-
FunctionVariable(String) - Constructor for class neureka.math.implementations.FunctionVariable
+
FunctionVariable(String) - Constructor for class neureka.math.implementations.FunctionVariable
 
-
FunDeviceAlgorithm - Class in neureka.backend.api.template.algorithms
+
FunDeviceAlgorithm - Class in neureka.backend.api.template.algorithms
 
-
FunDeviceAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.FunDeviceAlgorithm
+
FunDeviceAlgorithm(String) - Constructor for class neureka.backend.api.template.algorithms.FunDeviceAlgorithm
 
-

G

-
-
GaSU - Class in neureka.backend.main.operations.functions
+ + + +

G

+
+
GASU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
 
+
GaSU - Class in neureka.backend.main.operations.functions
The Self Gated Softsign Unit is based on the Softsign function (a computationally cheap non-exponential quasi Tanh) making it a polynomially based version of the GaTU function which is itself based on the Tanh function.
-
GaSU() - Constructor for class neureka.backend.main.operations.functions.GaSU
+
GaSU() - Constructor for class neureka.backend.main.operations.functions.GaSU
 
-
GASU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
GATU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
GaTU - Class in neureka.backend.main.operations.functions
+
GaTU - Class in neureka.backend.main.operations.functions
The Self Gated Tanh Unit is based on the Tanh making it an exponentiation based version of the GaSU function which is itself based on the Softsign function (a computationally cheap non-exponential quasi Tanh).
-
GaTU() - Constructor for class neureka.backend.main.operations.functions.GaTU
-
 
-
GATU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
GaTU() - Constructor for class neureka.backend.main.operations.functions.GaTU
 
-
gaus() - Method in class neureka.math.Functions
+
gaus() - Method in class neureka.math.Functions
 
-
Gaussian - Class in neureka.backend.main.operations.functions
+
GAUSSIAN - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Gaussian() - Constructor for class neureka.backend.main.operations.functions.Gaussian
+
Gaussian - Class in neureka.backend.main.operations.functions
 
-
GAUSSIAN - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
Gaussian() - Constructor for class neureka.backend.main.operations.functions.Gaussian
 
-
GAUSSIAN_FAST - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
GAUSSIAN_FAST - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
GaussianFast - Class in neureka.backend.main.operations.functions
+
GaussianFast - Class in neureka.backend.main.operations.functions
 
-
GaussianFast() - Constructor for class neureka.backend.main.operations.functions.GaussianFast
+
GaussianFast() - Constructor for class neureka.backend.main.operations.functions.GaussianFast
 
-
gaussianFrom(long, double[]) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
+
gaussianFrom(long, double[]) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
 
-
gelu() - Method in class neureka.math.Functions
+
GELU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
gelu(double) - Static method in class neureka.backend.main.implementations.fun.ScalarGeLU
+
gelu(double) - Static method in class neureka.backend.main.implementations.fun.ScalarGeLU
 
-
GeLU - Class in neureka.backend.main.operations.functions
+
GeLU - Class in neureka.backend.main.operations.functions
The GELU activation function is based on the standard Gaussian cumulative distribution function and is defined as x Φ( x ) and implemented as x * sigmoid(x * 1.702).
-
GeLU() - Constructor for class neureka.backend.main.operations.functions.GeLU
+
GeLU() - Constructor for class neureka.backend.main.operations.functions.GeLU
 
-
GELU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
gelu() - Method in class neureka.math.Functions
 
-
GEMM - Class in neureka.backend.main.operations.linear.internal.blas
+
GEMM - Class in neureka.backend.main.operations.linear.internal.blas
A collection of primitive sub-routines for matrix multiplication performed on continuous arrays which are designed so that they can be vectorized by the JVMs JIT compiler (AVX instructions).
-
GEMM() - Constructor for class neureka.backend.main.operations.linear.internal.blas.GEMM
+
GEMM() - Constructor for class neureka.backend.main.operations.linear.internal.blas.GEMM
 
-
GEMM.VectorOperationF32 - Interface in neureka.backend.main.operations.linear.internal.blas
+
GEMM.VectorOperationF32 - Interface in neureka.backend.main.operations.linear.internal.blas
 
-
GEMM.VectorOperationF64 - Interface in neureka.backend.main.operations.linear.internal.blas
+
GEMM.VectorOperationF64 - Interface in neureka.backend.main.operations.linear.internal.blas
 
-
get() - Method in class neureka.backend.api.Call.Builder
+
get() - Method in class neureka.backend.api.Call.Builder
 
-
get() - Method in class neureka.backend.api.LazyRef
+
get(Class<T>) - Method in class neureka.backend.api.Call
 
-
get() - Method in class neureka.backend.api.Result
+
get() - Method in class neureka.backend.api.LazyRef
 
-
get() - Static method in class neureka.common.utility.DataConverter
+
get() - Method in class neureka.backend.api.Result
+
 
+
get(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
+
+
This method tries to find a component inside the internal + component array whose class matches the one provided.
+
+
get(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
+
+
Use this to get the component of the specified component type class.
+
+
get() - Static method in class neureka.common.utility.DataConverter
This method returns the singleton.
-
get() - Method in interface neureka.Data
+
get() - Method in interface neureka.Data
This returns the underlying raw data object of a nd-array or tensor of a backend specific type (e.g.
-
get() - Static method in class neureka.devices.host.CPU
+
get(String...) - Static method in interface neureka.devices.Device
+
+
This method returns Device instances matching + the given search parameter.
+
+
get(Class<D>, String...) - Static method in interface neureka.devices.Device
+
+
This method returns Device instances matching + the given search parameters.
+
+
get() - Static method in class neureka.devices.host.CPU
Use this method to access the singleton instance of this CPU class, which is a Device type and default location for freshly instantiated Tensor instances.
-
get() - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
get(String) - Method in class neureka.devices.opencl.KernelCache
+
 
+
get(cl_device_id) - Method in class neureka.devices.opencl.OpenCLPlatform
+
 
+
get() - Method in class neureka.fluent.slicing.AxisSliceBuilder
 
-
get() - Method in class neureka.fluent.slicing.SliceBuilder
+
get() - Method in class neureka.fluent.slicing.SliceBuilder
This method will create and return a new slice tensor based on the - provided configuration through methods like AxisSliceBuilder.from(int), - AxisSliceBuilder.to(int) and AxisSliceBuilder.at(int)...
+ provided configuration through methods like AxisSliceBuilder.from(int), + AxisSliceBuilder.to(int) and AxisSliceBuilder.at(int)...
-
get() - Method in interface neureka.fluent.slicing.states.AxisOrGet
+
get() - Method in interface neureka.fluent.slicing.states.AxisOrGet
This method concludes the slicing API by performing the actual slicing and returning the resulting Tensor instance based on the previously specified slice configuration...
-
get() - Method in interface neureka.fluent.slicing.states.AxisOrGetTensor
+
get() - Method in interface neureka.fluent.slicing.states.AxisOrGetTensor
This method concludes the slicing API by performing the actual slicing and returning the resulting Tensor instance based on the previously specified slice configuration...
-
get() - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGetTensor
+
get() - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGetTensor
This method concludes the slicing API by performing the actual slicing and returning the resulting Tensor instance based on the previously specified slice configuration...
-
get() - Method in interface neureka.framing.fluent.Get
+
Get<ValueType> - Interface in neureka.framing.fluent
 
-
get() - Method in class neureka.math.args.Arg
+
get() - Method in interface neureka.framing.fluent.Get
 
-
get() - Method in interface neureka.Nda.Item
-
-
Get the value at the targeted position or throw an exception if the item does not exist.
-
-
get() - Method in interface neureka.ndim.iterator.NDIterator
+
get(List<Object>) - Method in class neureka.framing.NDFrame
+
 
+
get(Object...) - Method in class neureka.framing.NDFrame
 
-
get() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
-
get() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
-
get() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
-
get() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
-
get() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
-
get() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
-
get() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
-
get() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
-
get() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
get() - Method in class neureka.math.args.Arg
 
-
get() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
get(String, boolean) - Method in class neureka.math.FunctionCache
 
-
get() - Static method in class neureka.Neureka
+
get(int...) - Method in interface neureka.Nda
-
The Neureka class represents the configuration of this library.
+
The following method enables access to specific scalar elements within the nd-array.
-
get(int) - Method in interface neureka.Nda
+
get(Object...) - Method in interface neureka.Nda
-
This getter method creates and returns a slice of the original nd-array.
+
The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
-
get(int) - Method in interface neureka.ndim.iterator.NDIterator
-
 
-
get(int) - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
-
get(int) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
-
get(int) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
-
 
-
get(int) - Method in interface neureka.Shape
-
 
-
get(int) - Method in interface neureka.Tensor
+
get(int) - Method in interface neureka.Nda
This getter method creates and returns a slice of the original nd-array.
-
get(int...) - Method in interface neureka.Nda
+
get(Number) - Method in interface neureka.Nda
-
The following method enables access to specific scalar elements within the nd-array.
+
This getter method creates and returns a slice of the original nd-array.
-
get(int...) - Method in interface neureka.Tensor
+
get(Object) - Method in interface neureka.Nda
-
The following method enables access to specific scalar elements within the nd-array.
+
This method enables nd-array slicing! + It takes a key of various types and configures a slice + nd-array which shares the same underlying data as the original nd-array.
-
get(Class<D>, String...) - Static method in interface neureka.devices.Device
+
get() - Method in interface neureka.Nda.Item
-
This method returns Device instances matching - the given search parameters.
+
Get the value at the targeted position or throw an exception if the item does not exist.
-
get(Class<T>) - Method in class neureka.backend.api.Call
+
get(int) - Method in interface neureka.ndim.iterator.NDIterator
 
-
get(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
-
-
This method tries to find a component inside the internal - component array whose class matches the one provided.
+
get() - Method in interface neureka.ndim.iterator.NDIterator
+
 
+
get(int) - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
get() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
get() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
get() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
get() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
get() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
get() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
get() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
get() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
get(int) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
get() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
 
+
get(int) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
 
+
get() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
 
+
get() - Static method in class neureka.Neureka
+
+
The Neureka class represents the configuration of this library.
-
get(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
+
get(int) - Method in interface neureka.Shape
+
 
+
get(int...) - Method in interface neureka.Tensor
-
Use this to get the component of the specified component type class.
+
The following method enables access to specific scalar elements within the nd-array.
-
get(Number) - Method in interface neureka.Nda
+
get(Object...) - Method in interface neureka.Tensor
-
This getter method creates and returns a slice of the original nd-array.
+
The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
-
get(Number) - Method in interface neureka.Tensor
+
get(int) - Method in interface neureka.Tensor
This getter method creates and returns a slice of the original nd-array.
-
get(Object) - Method in interface neureka.Nda
+
get(Number) - Method in interface neureka.Tensor
-
This method enables nd-array slicing! - It takes a key of various types and configures a slice - nd-array which shares the same underlying data as the original nd-array.
+
This getter method creates and returns a slice of the original nd-array.
-
get(Object) - Method in interface neureka.Tensor
+
get(Object) - Method in interface neureka.Tensor
This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
-
get(Object...) - Method in class neureka.framing.NDFrame
-
 
-
get(Object...) - Method in interface neureka.Nda
-
-
The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
-
-
get(Object...) - Method in interface neureka.Tensor
-
-
The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
-
-
get(String) - Method in class neureka.devices.opencl.KernelCache
-
 
-
get(String...) - Static method in interface neureka.devices.Device
-
-
This method returns Device instances matching - the given search parameter.
-
-
get(String, boolean) - Method in class neureka.math.FunctionCache
-
 
-
get(List<Object>) - Method in class neureka.framing.NDFrame
-
 
-
get(cl_device_id) - Method in class neureka.devices.opencl.OpenCLPlatform
+
getAbs() - Method in class neureka.math.Functions
 
-
Get<ValueType> - Interface in neureka.framing.fluent
+
getActivation() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
getAbs() - Method in class neureka.math.Functions
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
 
-
getActivation() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarExp
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarExp
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
+
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
-
 
-
getActivation() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
-
 
-
getActiveThreadCount() - Method in class neureka.devices.host.CPU.JVMExecutor
+
getActiveThreadCount() - Method in class neureka.devices.host.CPU.JVMExecutor
Returns the approximate number of threads that are actively executing tasks.
-
getAdd() - Method in class neureka.math.Functions
+
getAdd() - Method in class neureka.math.Functions
 
-
getAddAssign() - Method in class neureka.math.Functions
+
getAddAssign() - Method in class neureka.math.Functions
 
-
getAdHocKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
+
getAdHocKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
getAgentSupplier() - Method in class neureka.backend.api.Result
+
getAgentSupplier() - Method in class neureka.backend.api.Result
 
-
getAlgorithm() - Method in class neureka.backend.api.ExecutionCall
+
getAlgorithm() - Method in class neureka.backend.api.ExecutionCall
An ExecutionCall will either already have a targeted Algorithm defined at instantiation or otherwise it will query the associated Operation for an Algorithm best suitable for the state of this ExecutionCall.
-
getAlgorithm(Class<T>) - Method in interface neureka.backend.api.Operation
+
getAlgorithm(Class<T>) - Method in interface neureka.backend.api.Operation
Operation implementations embody a component system hosting unique Algorithm instances.
-
getAlgorithm(Class<T>) - Method in class neureka.backend.api.template.operations.AbstractOperation
+
getAlgorithm(Class<T>) - Method in class neureka.backend.api.template.operations.AbstractOperation
Operation implementations embody a component system hosting unique Algorithm instances.
-
getAlgorithmFor(ExecutionCall<?>) - Method in interface neureka.backend.api.Operation
+
getAlgorithmFor(ExecutionCall<?>) - Method in interface neureka.backend.api.Operation
Alongside a component system made up of Algorithm instances, implementations of this interface also ought to express a routing mechanism which finds the best Algorithm for a given ExecutionCall instance.
-
getAlgorithmFor(ExecutionCall<?>) - Method in class neureka.backend.api.template.operations.AbstractOperation
+
getAlgorithmFor(ExecutionCall<?>) - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
getAlgorithmName() - Method in interface neureka.backend.api.ini.LoadingContext
+
getAlgorithmName() - Method in interface neureka.backend.api.ini.LoadingContext
 
-
getAll(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
+
getAll(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
This method tries to find all components inside the internal component array whose classes are sub types of the one provided.
-
getAll(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
+
getAll(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
Use this to get all components of the specified component type class.
-
getAllAlgorithms() - Method in interface neureka.backend.api.Operation
+
getAllAlgorithms() - Method in interface neureka.backend.api.Operation
 
-
getAllAlgorithms() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
getAllAlgorithms() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
getAllAliases() - Method in class neureka.framing.fluent.AxisFrame
+
getAllAliases() - Method in class neureka.framing.fluent.AxisFrame
 
-
getAllAliasesForIndex(int) - Method in class neureka.framing.fluent.AxisFrame
+
getAllAliasesForIndex(int) - Method in class neureka.framing.fluent.AxisFrame
 
-
getAllFunctions() - Method in interface neureka.math.Function
+
getAllFunctions() - Method in interface neureka.math.Function
 
-
getAndRemovePendingError() - Method in class neureka.autograd.GraphNode
+
getAndRemovePendingError() - Method in class neureka.autograd.GraphNode
This method is called by the JITProp component.
-
getArchitecture() - Static method in class neureka.devices.host.machine.ConcreteMachine
+
getArchitecture() - Static method in class neureka.devices.host.machine.ConcreteMachine
 
-
getArity() - Method in interface neureka.backend.api.Operation
+
getArity() - Method in interface neureka.backend.api.Operation
Arity is the number of arguments or operands that this function or operation takes.
-
getArity() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
getArity() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
getArity() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getArity() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getAsInt() - Method in enum class neureka.devices.host.concurrent.Parallelism
+
getAsInt() - Method in enum neureka.devices.host.concurrent.Parallelism
 
-
getAt(int) - Method in interface neureka.Nda
+
getAt(int...) - Method in interface neureka.Nda
-
This getter method creates and returns a slice of the original nd-array.
+
The following method enables access to specific scalar elements within the nd-array.
-
getAt(int) - Method in interface neureka.Tensor
+
getAt(Number) - Method in interface neureka.Nda
This getter method creates and returns a slice of the original nd-array.
-
getAt(int...) - Method in interface neureka.Nda
+
getAt(Object...) - Method in interface neureka.Nda
-
The following method enables access to specific scalar elements within the nd-array.
+
The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
-
getAt(int...) - Method in interface neureka.Tensor
+
getAt(int) - Method in interface neureka.Nda
-
The following method enables access to specific scalar elements within the nd-array.
+
This getter method creates and returns a slice of the original nd-array.
-
getAt(Number) - Method in interface neureka.Nda
+
getAt(Map<?, Integer>) - Method in interface neureka.Nda
-
This getter method creates and returns a slice of the original nd-array.
+
This method is most useful when used in Groovy + where defining maps is done through square brackets, + making it possible to slice nd-arrays like so:
-
getAt(Number) - Method in interface neureka.Tensor
+
getAt(List<?>) - Method in interface neureka.Nda
-
This getter method creates and returns a slice of the original nd-array.
+
This method enables nd-array slicing! + It takes a key of various types and configures a slice + nd-array which shares the same underlying data as the original nd-array.
-
getAt(Object...) - Method in interface neureka.Nda
+
getAt(int...) - Method in interface neureka.Tensor
-
The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
+
The following method enables access to specific scalar elements within the nd-array.
-
getAt(Object...) - Method in interface neureka.Tensor
+
getAt(Number) - Method in interface neureka.Tensor
-
The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
+
This getter method creates and returns a slice of the original nd-array.
-
getAt(List<?>) - Method in interface neureka.Nda
+
getAt(Object...) - Method in interface neureka.Tensor
-
This method enables nd-array slicing! - It takes a key of various types and configures a slice - nd-array which shares the same underlying data as the original nd-array.
+
The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
-
getAt(List<?>) - Method in interface neureka.Tensor
+
getAt(int) - Method in interface neureka.Tensor
-
This method enables nd-array slicing! - It takes a key of various types and configures a slice - nd-array which shares the same underlying data as the original nd-array.
+
This getter method creates and returns a slice of the original nd-array.
-
getAt(Map<?, Integer>) - Method in interface neureka.Nda
+
getAt(Map<?, Integer>) - Method in interface neureka.Tensor
This method is most useful when used in Groovy where defining maps is done through square brackets, making it possible to slice nd-arrays like so:
-
getAt(Map<?, Integer>) - Method in interface neureka.Tensor
+
getAt(List<?>) - Method in interface neureka.Tensor
-
This method is most useful when used in Groovy - where defining maps is done through square brackets, - making it possible to slice nd-arrays like so:
+
This method enables nd-array slicing! + It takes a key of various types and configures a slice + nd-array which shares the same underlying data as the original nd-array.
-
getAutogradFunction() - Method in class neureka.backend.api.BackendContext
+
getAutogradFunction() - Method in class neureka.backend.api.BackendContext
This method returns a Functions instance which wraps pre-instantiated Function instances which are configured to track their computational history.
-
getBackend() - Method in class neureka.Neureka
+
getBackend() - Method in class neureka.Neureka
 
-
getCbrt() - Method in class neureka.math.Functions
+
getCbrt() - Method in class neureka.math.Functions
 
-
getCellSize() - Method in class neureka.view.NDPrintSettings
+
getCellSize() - Method in class neureka.view.NDPrintSettings
A cell size refers to the number of characters reserved to - the String representation of a single element.
+ the String representation of a single element.
-
getChildren() - Method in class neureka.autograd.GraphNode
+
getChildren() - Method in class neureka.autograd.GraphNode
The children are GraphNode instances which represent computations involving the payload of this very GraphNode instance.
-
getChildren() - Method in class neureka.framing.Relation
+
getChildren() - Method in class neureka.framing.Relation
 
-
getCode() - Method in class neureka.devices.opencl.KernelCode
+
getCode() - Method in class neureka.devices.opencl.KernelCode
 
-
getColLabels() - Method in class neureka.devices.file.CSVHandle
+
getColLabels() - Method in class neureka.devices.file.CSVHandle
 
-
getCompletedTaskCount() - Method in class neureka.devices.host.CPU.JVMExecutor
+
getCompletedTaskCount() - Method in class neureka.devices.host.CPU.JVMExecutor
Returns the approximate total number of tasks that have completed execution.
-
getConcat() - Method in class neureka.math.Functions
+
getConcat() - Method in class neureka.math.Functions
 
-
getContext() - Method in class neureka.devices.opencl.OpenCLPlatform
+
getContext() - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
getConv() - Method in class neureka.math.Functions
+
getConv() - Method in class neureka.math.Functions
 
-
getCoreCount() - Method in class neureka.devices.host.CPU
+
getCoreCount() - Method in class neureka.devices.host.CPU
Returns the number of CPU cores available to the Java virtual machine.
-
getCorePoolSize() - Method in class neureka.devices.host.CPU.JVMExecutor
+
getCorePoolSize() - Method in class neureka.devices.host.CPU.JVMExecutor
Returns the core number of threads.
-
getCos() - Method in class neureka.math.Functions
+
getCos() - Method in class neureka.math.Functions
 
-
getData() - Method in class neureka.common.utility.ListReader.Result
+
getData() - Method in class neureka.common.utility.ListReader.Result
 
-
getData() - Method in interface neureka.MutateNda
+
getData() - Method in interface neureka.MutateNda
At the heart of every tensor is the Data object, which holds the actual data array, a sequence of values of the same type.
-
getDataAs(Class<A>) - Method in interface neureka.MutateNda
+
getDataAs(Class<A>) - Method in interface neureka.MutateNda
This method returns the data of this nd-array as a Java array of the specified type.
-
getDataAs(Class<A>) - Method in interface neureka.Nda
+
getDataAs(Class<A>) - Method in interface neureka.Nda
Use this to get the items of the underlying Data buffer of this nd-array as a primitive array of the specified type.
-
getDataAt(int) - Method in interface neureka.Nda
+
getDataAt(int) - Method in interface neureka.Nda
Use this to access elements of the underlying data array without any index transformation applied to it.
-
getDataForWriting(Class<A>) - Method in interface neureka.MutateTensor
+
getDataForWriting(Class<A>) - Method in interface neureka.MutateTensor
Use this to access the underlying writable data of this tensor if you want to modify it.
-
getDataSize() - Method in interface neureka.devices.Device.Access
+
getDataSize() - Method in interface neureka.devices.Device.Access
 
-
getDataSize() - Method in class neureka.devices.file.CSVHandle
+
getDataSize() - Method in class neureka.devices.file.CSVHandle
 
-
getDataSize() - Method in interface neureka.devices.file.FileHandle
+
getDataSize() - Method in interface neureka.devices.file.FileHandle
This method returns the byte size of the data which is stored in the tensor of the file which is managed by this FileHandle.
-
getDataSize() - Method in class neureka.devices.file.IDXHandle
+
getDataSize() - Method in class neureka.devices.file.IDXHandle
 
-
getDataType() - Method in class neureka.devices.file.CSVHandle
+
getDataType() - Method in class neureka.devices.file.CSVHandle
 
-
getDataType() - Method in interface neureka.devices.file.FileHandle
+
getDataType() - Method in interface neureka.devices.file.FileHandle
 
-
getDataType() - Method in class neureka.devices.file.IDXHandle
+
getDataType() - Method in class neureka.devices.file.IDXHandle
 
-
getDataType() - Method in class neureka.devices.opencl.KernelCode
+
getDataType() - Method in class neureka.devices.opencl.KernelCode
 
-
getDataType() - Method in interface neureka.Tensor
+
getDataType() - Method in interface neureka.Tensor
This method returns the DataType instance of this Tensor, which is a wrapper object for the actual type class representing the value items stored inside the underlying data array of this tensor.
-
getDefaultAlgorithm() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
getDefaultAlgorithm() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
getDefaultDataType() - Method in class neureka.Neureka.Settings.DType
+
getDefaultDataType() - Method in class neureka.Neureka.Settings.DType
 
-
getDefaultDataTypeClass() - Method in class neureka.Neureka.Settings.DType
+
getDefaultDataTypeClass() - Method in class neureka.Neureka.Settings.DType
The default data type is not relevant most of the time.
-
getDelimiter() - Method in class neureka.devices.file.CSVHandle
+
getDelimiter() - Method in class neureka.devices.file.CSVHandle
 
-
getDerivative() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
getDerivative() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarExp
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarExp
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
 
-
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
+
getDerivative() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
 
-
getDerivative(int) - Method in interface neureka.math.Function
+
getDerivative(int) - Method in interface neureka.math.Function
This method builds a new Function which is the derivative of this Function with respect to the provided input index.
-
getDerivative(int) - Method in class neureka.math.implementations.FunctionConstant
+
getDerivative(int) - Method in class neureka.math.implementations.FunctionConstant
 
-
getDerivative(int) - Method in class neureka.math.implementations.FunctionInput
+
getDerivative(int) - Method in class neureka.math.implementations.FunctionInput
 
-
getDerivative(int) - Method in class neureka.math.implementations.FunctionNode
+
getDerivative(int) - Method in class neureka.math.implementations.FunctionNode
 
-
getDerivative(int) - Method in class neureka.math.implementations.FunctionVariable
+
getDerivative(int) - Method in class neureka.math.implementations.FunctionVariable
 
-
getDerivativeIndex() - Method in class neureka.backend.api.Call
+
getDerivativeIndex() - Method in class neureka.backend.api.Call
 
-
getDerivator() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getDerivator() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getDevice() - Method in class neureka.backend.api.Call
+
getDevice() - Method in class neureka.backend.api.Call
 
-
getDevice() - Method in interface neureka.Tensor
+
getDevice() - Method in interface neureka.Tensor
 
-
getDeviceFor(Class<T>) - Method in class neureka.backend.api.Call
+
getDeviceFor(Class<T>) - Method in class neureka.backend.api.Call
 
-
getDevices() - Method in class neureka.devices.opencl.OpenCLPlatform
+
getDevices() - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
getDimTrim() - Method in class neureka.math.Functions
+
getDimTrim() - Method in class neureka.math.Functions
 
-
getDirectory() - Method in class neureka.devices.file.FileDevice
+
getDirectory() - Method in class neureka.devices.file.FileDevice
 
-
getDiv() - Method in class neureka.math.Functions
+
getDiv() - Method in class neureka.math.Functions
 
-
getDivAssign() - Method in class neureka.math.Functions
+
getDivAssign() - Method in class neureka.math.Functions
 
-
getDot() - Method in class neureka.math.Functions
+
getDot() - Method in class neureka.math.Functions
 
-
getEstimation() - Method in class neureka.backend.api.Call.Validator.Estimator
+
getEstimation() - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
getEstimator() - Method in class neureka.backend.api.Call.Validator
+
getEstimator() - Method in class neureka.backend.api.Call.Validator
 
-
getExecutor() - Method in class neureka.devices.host.CPU
+
getExecutor() - Method in class neureka.devices.host.CPU
The CPU.JVMExecutor offers a similar functionality as the parallel stream API, however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas instead of simply exposing a single index or concrete elements for a given workload size.
-
getExp() - Method in class neureka.math.Functions
+
getExp() - Method in class neureka.math.Functions
 
-
getExtensions() - Method in class neureka.backend.api.BackendContext
+
getExtensions() - Method in class neureka.backend.api.BackendContext
 
-
getFastGaus() - Method in class neureka.math.Functions
+
getFastGaus() - Method in class neureka.math.Functions
 
-
getFastTanh() - Method in class neureka.math.Functions
+
getFastTanh() - Method in class neureka.math.Functions
 
-
getFileName() - Method in class neureka.devices.file.IDXHandle
+
getFileName() - Method in interface neureka.devices.file.FileHandle
 
-
getFileName() - Method in interface neureka.devices.file.FileHandle
+
getFrame() - Method in interface neureka.Tensor
 
-
getFrame() - Method in interface neureka.Tensor
-
 
-
getFunction() - Method in class neureka.autograd.GraphNode
+
getFunction() - Method in class neureka.autograd.GraphNode
Recorded Function which produced this GraphNode.
-
getFunction() - Method in class neureka.backend.api.BackendContext
+
getFunction() - Method in class neureka.backend.api.BackendContext
This method returns a Functions instance which wraps pre-instantiated Function instances which are configured to not track their computational history.
-
getFunctionCache() - Method in class neureka.backend.api.BackendContext
+
getFunctionCache() - Method in class neureka.backend.api.BackendContext
 
-
getGaus() - Method in class neureka.math.Functions
+
getGaus() - Method in class neureka.math.Functions
 
-
getGelu() - Method in class neureka.math.Functions
+
getGelu() - Method in class neureka.math.Functions
 
-
getGradient() - Method in interface neureka.Tensor
+
getGradient() - Method in interface neureka.Tensor
 
-
getGraphNode() - Method in interface neureka.Tensor
+
getGraphNode() - Method in interface neureka.Tensor
 
-
getHasDerivatives() - Method in class neureka.view.NDPrintSettings
+
getHasDerivatives() - Method in class neureka.view.NDPrintSettings
 
-
getHasGradient() - Method in class neureka.view.NDPrintSettings
+
getHasGradient() - Method in class neureka.view.NDPrintSettings
 
-
getHasRecursiveGraph() - Method in class neureka.view.NDPrintSettings
+
getHasRecursiveGraph() - Method in class neureka.view.NDPrintSettings
 
-
getHasShape() - Method in class neureka.view.NDPrintSettings
+
getHasShape() - Method in class neureka.view.NDPrintSettings
 
-
getHasSlimNumbers() - Method in class neureka.view.NDPrintSettings
+
getHasSlimNumbers() - Method in class neureka.view.NDPrintSettings
 
-
getHasValue() - Method in class neureka.view.NDPrintSettings
+
getHasValue() - Method in class neureka.view.NDPrintSettings
 
-
getId() - Method in class neureka.devices.opencl.OpenCLDevice
+
getId() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
getId() - Method in class neureka.devices.opencl.OpenCLPlatform
+
getId() - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
getIdentifier() - Method in interface neureka.backend.api.Operation
+
getIdentifier() - Method in interface neureka.backend.api.Operation
Concrete Operation types ought to be representable by a function name.
-
getIdentifier() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
getIdentifier() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
getIdentifier() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getIdentifier() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getIdy() - Method in class neureka.math.Functions
+
getIdy() - Method in class neureka.math.Functions
 
-
getImplementationFor(D) - Method in interface neureka.backend.api.DeviceAlgorithm
+
getImplementationFor(Class<D>) - Method in interface neureka.backend.api.DeviceAlgorithm
-
An ImplementationFor a specific Device can be accessed by passing +
An ImplementationFor a specific Device can be accessed by passing the class of the Device for which an implementation should be returned.
-
getImplementationFor(Class<D>) - Method in interface neureka.backend.api.DeviceAlgorithm
+
getImplementationFor(D) - Method in interface neureka.backend.api.DeviceAlgorithm
-
An ImplementationFor a specific Device can be accessed by passing the class of +
An ImplementationFor a specific Device can be accessed by passing the Device for which an implementation should be returned.
-
getImplementationFor(Class<D>) - Method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
getImplementationFor(Class<D>) - Method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
getIndent() - Method in class neureka.view.NDPrintSettings
+
getIndent() - Method in class neureka.view.NDPrintSettings
 
-
getIndexAndIncrement() - Method in interface neureka.ndim.iterator.NDIterator
+
getIndexAndIncrement() - Method in interface neureka.ndim.iterator.NDIterator
 
-
getIndexAtAlias(Object) - Method in class neureka.framing.fluent.AxisFrame
+
getIndexAtAlias(Object) - Method in class neureka.framing.fluent.AxisFrame
 
-
getIndexToIndexAccessPattern() - Method in interface neureka.ndim.config.NDConfiguration
+
getIndexToIndexAccessPattern() - Method in interface neureka.ndim.config.NDConfiguration
 
-
getInt(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getInt(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the value of the device info parameter with the given name
-
getInts(cl_device_id, int, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getInts(cl_device_id, int, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the values of the device info parameter with the given name
-
getIsAutoConvertingExternalDataToJVMTypes() - Method in class neureka.Neureka.Settings.DType
+
getIsAutoConvertingExternalDataToJVMTypes() - Method in class neureka.Neureka.Settings.DType
This flag will determine if foreign data types will be converted into the next best fit (in terms of bits) or if it should be converted into something that does not mess with the representation of the data.
-
getIsCellBound() - Method in class neureka.view.NDPrintSettings
+
getIsCellBound() - Method in class neureka.view.NDPrintSettings
 
-
getIsDifferentiable() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getIsDifferentiable() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getIsIndexer() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getIsIndexer() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getIsInline() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getIsInline() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getIsLegacy() - Method in class neureka.view.NDPrintSettings
+
getIsLegacy() - Method in class neureka.view.NDPrintSettings
This flag determines the usage of bracket types, where "[1x3]:(1, 2, 3)" would be the legacy version of "(1x3):[1, 2, 3]".
-
getIsMultiline() - Method in class neureka.view.NDPrintSettings
+
getIsMultiline() - Method in class neureka.view.NDPrintSettings
 
-
getIsOperator() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getIsOperator() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getIsScientific() - Method in class neureka.view.NDPrintSettings
+
getIsScientific() - Method in class neureka.view.NDPrintSettings
 
-
getItem() - Method in interface neureka.Nda
+
getItem() - Method in interface neureka.Nda
-
Equivalent to the #item(0) and Nda.item().
+
Equivalent to the #item(0) and Nda.item().
-
getItems() - Method in interface neureka.Nda
+
getItems() - Method in interface neureka.Nda
-
A more verbose version of the Nda.items() method (best used by JVM languages with property support).
+
A more verbose version of the Nda.items() method (best used by JVM languages with property support).
-
getItemsAs(Class<A>) - Method in interface neureka.Nda
+
getItemsAs(Class<A>) - Method in interface neureka.Nda
Use this to get the items of this nd-array as a primitive array of the specified type.
-
getItemType() - Method in interface neureka.Nda
+
getItemType() - Method in interface neureka.Nda
 
-
getItemTypeClass() - Method in class neureka.dtype.DataType
+
getItemTypeClass() - Method in class neureka.dtype.DataType
 
-
getKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
+
getKernel(ExecutionCall<OpenCLDevice>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
getKernel(String) - Method in class neureka.devices.opencl.OpenCLPlatform
+
getKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
getKernel(ExecutionCall<OpenCLDevice>) - Method in class neureka.devices.opencl.OpenCLDevice
+
getKernel(String) - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
getKernelCode() - Method in class neureka.backend.main.implementations.ParsedCLImplementation
+
getKernelCode() - Method in class neureka.backend.main.implementations.ParsedCLImplementation
 
-
getKernelCode() - Method in class neureka.backend.main.implementations.SimpleCLImplementation
+
getKernelCode() - Method in class neureka.backend.main.implementations.SimpleCLImplementation
 
-
getKernelCode() - Method in interface neureka.devices.opencl.StaticKernelSource
+
getKernelCode() - Method in interface neureka.devices.opencl.StaticKernelSource
 
-
getKernelFor(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.ParsedCLImplementation
+
getKernelFor(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.ParsedCLImplementation
 
-
getKernelFor(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.SimpleCLImplementation
+
getKernelFor(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.SimpleCLImplementation
 
-
getKernelFor(ExecutionCall<OpenCLDevice>) - Method in interface neureka.devices.opencl.KernelSource
+
getKernelFor(ExecutionCall<OpenCLDevice>) - Method in interface neureka.devices.opencl.KernelSource
 
-
getLabel() - Method in class neureka.framing.NDFrame
+
getLabel() - Method in class neureka.framing.NDFrame
 
-
getLabel() - Method in interface neureka.Nda
+
getLabel() - Method in interface neureka.Nda
A nd-array can have a label.
-
getLayout() - Method in interface neureka.ndim.config.NDConfiguration
+
getLayout() - Method in interface neureka.ndim.config.NDConfiguration
The layout of most tensors is either row major or column major.
-
getLearningRate() - Method in class neureka.optimization.implementations.ADAM
-
 
-
getLn() - Method in class neureka.math.Functions
+
getLearningRate() - Method in class neureka.optimization.implementations.ADAM
 
-
getLoadable() - Method in class neureka.devices.file.FileDevice
+
getLn() - Method in class neureka.math.Functions
 
-
getLoaded() - Method in class neureka.devices.file.FileDevice
+
getLoadable() - Method in class neureka.devices.file.FileDevice
 
-
getLoader() - Method in interface neureka.backend.api.BackendExtension
+
getLoaded() - Method in class neureka.devices.file.FileDevice
 
-
getLoader() - Method in class neureka.backend.cpu.CPUBackend
+
getLoader() - Method in interface neureka.backend.api.BackendExtension
 
-
getLoader() - Method in class neureka.backend.ocl.CLBackend
+
getLoader() - Method in class neureka.backend.cpu.CPUBackend
 
-
getLocation() - Method in class neureka.devices.file.IDXHandle
+
getLoader() - Method in class neureka.backend.ocl.CLBackend
 
-
getLocation() - Method in interface neureka.devices.file.FileHandle
+
getLocation() - Method in interface neureka.devices.file.FileHandle
 
-
getLog10() - Method in class neureka.math.Functions
+
getLog10() - Method in class neureka.math.Functions
 
-
getLong(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getLong(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the value of the device info parameter with the given name
-
getLongs(int, ByteBuffer, long[]) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
-
 
-
getLongs(cl_device_id, int, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getLongs(cl_device_id, int, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the values of the device info parameter with the given name
-
getMatMul() - Method in class neureka.math.Functions
+
getLongs(int, ByteBuffer, long[]) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
 
+
getMatMul() - Method in class neureka.math.Functions
 
-
getMax() - Method in class neureka.math.Functions
+
getMax() - Method in class neureka.math.Functions
 
-
getMemory() - Static method in class neureka.devices.host.machine.ConcreteMachine
+
getMemory() - Static method in class neureka.devices.host.machine.ConcreteMachine
 
-
getMin() - Method in class neureka.math.Functions
+
getMin() - Method in class neureka.math.Functions
 
-
getMinus() - Method in class neureka.math.Functions
+
getMinus() - Method in class neureka.math.Functions
 
-
getMinusAssign() - Method in class neureka.math.Functions
+
getMinusAssign() - Method in class neureka.math.Functions
 
-
getMod() - Method in class neureka.math.Functions
+
getMod() - Method in class neureka.math.Functions
 
-
getModAssign() - Method in class neureka.math.Functions
+
getModAssign() - Method in class neureka.math.Functions
 
-
getMode() - Method in class neureka.autograd.GraphNode
+
getMode() - Method in class neureka.autograd.GraphNode
This is the getter for an important GraphNode property which holds the auto-differentiation mode used by this instance to decide if a given error should be forward propagated backward propagated or not propagated at all.
-
getMomentum() - Method in class neureka.optimization.implementations.ADAM
+
getMomentum() - Method in class neureka.optimization.implementations.ADAM
 
-
getMul() - Method in class neureka.math.Functions
+
getMul() - Method in class neureka.math.Functions
 
-
getMulAssign() - Method in class neureka.math.Functions
+
getMulAssign() - Method in class neureka.math.Functions
 
-
getMut() - Method in interface neureka.Nda
+
getMut() - Method in interface neureka.Nda
This method exposes an API for mutating the state of this tensor.
-
getMut() - Method in interface neureka.Tensor
+
getMut() - Method in interface neureka.Tensor
This method exposes an API for mutating the state of this tensor.
-
getName() - Method in interface neureka.backend.api.Algorithm
+
getName() - Method in interface neureka.backend.api.Algorithm
The name of an Algorithm may be used for OpenCL kernel compilation or simply for debugging purposes to identify which type of algorithm is being executed at any given time...
-
getName() - Method in class neureka.backend.main.algorithms.DotProductAlgorithm
-
-
This method returns the name of this Algorithm - which may be used as variable names in OpenCL kernels or other backends.
-
-
getName() - Method in class neureka.devices.opencl.KernelCode
+
getName() - Method in class neureka.devices.opencl.KernelCode
 
-
getNDConf() - Method in interface neureka.ndim.NDimensional
+
getNDConf() - Method in interface neureka.ndim.NDimensional
 
-
getNDPrintSettings() - Method in class neureka.Neureka.Settings.View
+
getNDPrintSettings() - Method in class neureka.Neureka.Settings.View
-
Settings for configuring how tensors should be converted to String representations.
+
Settings for configuring how tensors should be converted to String representations.
-
getNeg() - Method in class neureka.math.Functions
+
getNeg() - Method in class neureka.math.Functions
 
-
getNewInstance() - Static method in interface neureka.devices.DeviceCleaner
+
getNewInstance() - Static method in interface neureka.devices.DeviceCleaner
 
-
getNewOwner() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
+
getNewOwner() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
 
-
getNumberOfColumns() - Method in class neureka.devices.file.CSVHandle
+
getNumberOfColumns() - Method in class neureka.devices.file.CSVHandle
 
-
getNumberOfRows() - Method in class neureka.devices.file.CSVHandle
+
getNumberOfRows() - Method in class neureka.devices.file.CSVHandle
 
-
getNumericTypeTarget() - Method in class neureka.dtype.custom.I8
-
 
-
getNumericTypeTarget() - Method in interface neureka.dtype.NumericType
+
getNumericTypeTarget() - Method in interface neureka.dtype.NumericType
This method returns the NumericType representation of the target type of this class.
-
getOldOwner() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
-
 
-
getOperation() - Method in class neureka.backend.api.ExecutionCall
-
-
This returns the operation which will ultimately process this execution call.
-
-
getOperation() - Method in interface neureka.math.Function
-
 
-
getOperation() - Method in class neureka.math.implementations.FunctionConstant
-
 
-
getOperation() - Method in class neureka.math.implementations.FunctionInput
-
 
-
getOperation() - Method in class neureka.math.implementations.FunctionNode
+
getOldOwner() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
 
-
getOperation() - Method in class neureka.math.implementations.FunctionVariable
-
 
-
getOperation(int) - Method in class neureka.backend.api.BackendContext
+
getOperation(int) - Method in class neureka.backend.api.BackendContext
This method queries the operations in this BackendContext by a provided index integer targeting an entry in the list of Operation implementation instances sitting in this execution context.
-
getOperation(String) - Method in class neureka.backend.api.BackendContext
+
getOperation(String) - Method in class neureka.backend.api.BackendContext
This method queries the operations in this BackendContext by a provided identifier which has to match the name of an existing operation.
-
getOperationIdentidier() - Method in interface neureka.backend.api.ini.LoadingContext
+
getOperation() - Method in class neureka.backend.api.ExecutionCall
+
+
This returns the operation which will ultimately process this execution call.
+
+
getOperation() - Method in interface neureka.math.Function
+
 
+
getOperation() - Method in class neureka.math.implementations.FunctionConstant
+
 
+
getOperation() - Method in class neureka.math.implementations.FunctionInput
+
 
+
getOperation() - Method in class neureka.math.implementations.FunctionNode
+
 
+
getOperation() - Method in class neureka.math.implementations.FunctionVariable
 
-
getOperationLookupMap() - Method in class neureka.backend.api.BackendContext
+
getOperationIdentidier() - Method in interface neureka.backend.api.ini.LoadingContext
+
 
+
getOperationLookupMap() - Method in class neureka.backend.api.BackendContext
-
This method returns an unmodifiable view of the mapping between the Operation.getIdentifier() / Operation.getOperator() properties +
This method returns an unmodifiable view of the mapping between the Operation.getIdentifier() / Operation.getOperator() properties and the Operation implementation instances to which they belong.
-
getOperations() - Method in class neureka.backend.api.BackendContext
+
getOperations() - Method in class neureka.backend.api.BackendContext
This method returns an unmodifiable view of the list of Operation implementation instances managed by this context.
-
getOperator() - Method in interface neureka.backend.api.Operation
+
getOperator() - Method in interface neureka.backend.api.Operation
 
-
getOperator() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
getOperator() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
getOperator() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getOperator() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getOrNull() - Method in interface neureka.Data
+
getOrNull() - Method in interface neureka.Data
This returns the underlying raw data object of a nd-array or tensor of a backend specific type (e.g.
-
getOrNull() - Method in class neureka.devices.AbstractDeviceData
+
getOrNull() - Method in class neureka.devices.AbstractDeviceData
 
-
getParent() - Method in class neureka.framing.Relation
+
getParent() - Method in class neureka.framing.Relation
 
-
getParents() - Method in class neureka.autograd.GraphNode
+
getParents() - Method in class neureka.autograd.GraphNode
 
-
getPayload() - Method in class neureka.autograd.GraphNode
+
getPayload() - Method in class neureka.autograd.GraphNode
The value of a graph node is the tensor to which it belongs (is a component of).
-
getPayloadDataType() - Method in class neureka.autograd.GraphNode
+
getPayloadDataType() - Method in class neureka.autograd.GraphNode
 
-
getPayloadReferenceVersion() - Method in class neureka.autograd.GraphNode
+
getPayloadReferenceVersion() - Method in class neureka.autograd.GraphNode
This variable holds a copy of the version of the payload tensor recorded when this GraphNode instance is instantiated.
-
getPayloadShape() - Method in class neureka.autograd.GraphNode
+
getPayloadShape() - Method in class neureka.autograd.GraphNode
Note: This method will never return null even if the actual payload tensor was garbage collected.
-
getPendingError() - Method in class neureka.autograd.GraphNode
+
getPendingError() - Method in class neureka.autograd.GraphNode
Used by the Just-In-Time back-prop component.
-
getPermute() - Method in class neureka.math.Functions
+
getPermute() - Method in class neureka.math.Functions
 
-
getPermuteRelationFor(Tensor<V>) - Method in class neureka.framing.Relation
+
getPermuteRelationFor(Tensor<V>) - Method in class neureka.framing.Relation
When creating permuted versions of slices then there must be a translation between the shape configuration between this new slice and the original parent tensor from which both slices have been derived.
-
getPlatform() - Method in class neureka.devices.opencl.OpenCLDevice
+
getPlatform() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
getPlatforms() - Method in class neureka.backend.ocl.CLBackend
+
getPlatforms() - Method in class neureka.backend.ocl.CLBackend
 
-
getPlus() - Method in class neureka.math.Functions
+
getPlus() - Method in class neureka.math.Functions
 
-
getPlusAssign() - Method in class neureka.math.Functions
+
getPlusAssign() - Method in class neureka.math.Functions
 
-
getPostfix() - Method in class neureka.view.NDPrintSettings
+
getPostfix() - Method in class neureka.view.NDPrintSettings
 
-
getPow() - Method in class neureka.math.Functions
+
getPow() - Method in class neureka.math.Functions
 
-
getPowAssign() - Method in class neureka.math.Functions
+
getPowAssign() - Method in class neureka.math.Functions
 
-
getPrefix() - Method in class neureka.view.NDPrintSettings
+
getPrefix() - Method in class neureka.view.NDPrintSettings
 
-
getQuad() - Method in class neureka.math.Functions
+
getQuad() - Method in class neureka.math.Functions
 
-
getRandom() - Method in class neureka.math.Functions
+
getRandom() - Method in class neureka.math.Functions
 
-
getRank() - Method in interface neureka.ndim.NDimensional
+
getRank() - Method in interface neureka.ndim.NDimensional
 
-
getRawData() - Method in interface neureka.Nda
+
getRawData() - Method in interface neureka.Nda
This returns an unprocessed version of the underlying data of this nd-array.
-
getRawItems() - Method in interface neureka.Nda
+
getRawItems() - Method in interface neureka.Nda
 
-
getRelayout() - Method in class neureka.math.Functions
+
getRelayout() - Method in class neureka.math.Functions
 
-
getRelu() - Method in class neureka.math.Functions
+
getRelu() - Method in class neureka.math.Functions
 
-
getRepresentativeItemClass() - Method in interface neureka.Tensor
+
getRepresentativeItemClass() - Method in interface neureka.Tensor
-
The Class returned by this method is the representative Class of the - value items of a concrete AbstractNda but not necessarily the actual Class of +
The Class returned by this method is the representative Class of the + value items of a concrete AbstractNda but not necessarily the actual Class of a given value item, this is especially true for numeric types, which are represented by implementations of the NumericType interface.
-
getRepresentativeType() - Method in class neureka.dtype.DataType
+
getRepresentativeType() - Method in class neureka.dtype.DataType
 
-
getReshape() - Method in class neureka.math.Functions
+
getReshape() - Method in class neureka.math.Functions
 
-
getResult() - Method in class neureka.backend.main.memory.MemValidator
+
getResult() - Method in class neureka.backend.main.memory.MemValidator
 
-
getRowLabels() - Method in class neureka.devices.file.CSVHandle
+
getRowLabels() - Method in class neureka.devices.file.CSVHandle
 
-
getRowLimit() - Method in class neureka.view.NDPrintSettings
+
getRowLimit() - Method in class neureka.view.NDPrintSettings
Very large tensors with a rank larger than 1 might take a lot - of vertical space when converted to a String.
+ of vertical space when converted to a String.
-
getSelu() - Method in class neureka.math.Functions
+
getSelu() - Method in class neureka.math.Functions
The Scaled Exponential Linear Unit, or SELU, is an activation functions that induce self-normalizing properties.
-
getSettings() - Method in class neureka.backend.ocl.CLBackend
+
getSettings() - Method in class neureka.backend.ocl.CLBackend
 
-
getShape() - Method in class neureka.common.utility.ListReader.Result
+
getShape() - Method in class neureka.common.utility.ListReader.Result
 
-
getShape() - Method in class neureka.devices.file.CSVHandle
+
getShape() - Method in class neureka.devices.file.CSVHandle
 
-
getShape() - Method in interface neureka.devices.file.FileHandle
+
getShape() - Method in interface neureka.devices.file.FileHandle
 
-
getShape() - Method in class neureka.devices.file.IDXHandle
+
getShape() - Method in class neureka.devices.file.IDXHandle
 
-
getShape() - Method in interface neureka.ndim.NDConstructor
+
getShape() - Method in interface neureka.ndim.NDConstructor
 
-
getShape() - Method in interface neureka.ndim.NDimensional
+
getShape() - Method in interface neureka.ndim.NDimensional
 
-
getSigmoid() - Method in class neureka.math.Functions
+
getSigmoid() - Method in class neureka.math.Functions
 
-
getSilu() - Method in class neureka.math.Functions
+
getSilu() - Method in class neureka.math.Functions
The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x).
-
getSin() - Method in class neureka.math.Functions
-
 
-
getSize() - Method in interface neureka.ndim.NDConstructor
+
getSin() - Method in class neureka.math.Functions
 
-
getSize() - Method in interface neureka.ndim.NDimensional
-
 
-
getSize(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getSize(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the value of the device info parameter with the given name
-
getSizes(cl_device_id, int, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getSize() - Method in interface neureka.ndim.NDConstructor
+
 
+
getSize() - Method in interface neureka.ndim.NDimensional
+
 
+
getSizes(cl_device_id, int, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the values of the device info parameter with the given name
-
getSoftplus() - Method in class neureka.math.Functions
+
getSoftplus() - Method in class neureka.math.Functions
SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
-
getSoftsign() - Method in class neureka.math.Functions
+
getSoftsign() - Method in class neureka.math.Functions
The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function which rescales the inputs between -1 and 1, very much like the Tanh function.
-
getSqrt() - Method in class neureka.math.Functions
+
getSqrt() - Method in class neureka.math.Functions
 
-
getState() - Method in class neureka.framing.NDFrame
+
getState() - Method in class neureka.framing.NDFrame
 
-
getString(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getString(cl_device_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the value of the device info parameter with the given name
-
getString(cl_platform_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
+
getString(cl_platform_id, int) - Static method in class neureka.devices.opencl.OpenCLDevice.Query
Returns the value of the platform info parameter with the given name
-
getStringifier() - Method in class neureka.backend.api.template.operations.OperationBuilder
+
getStringifier() - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
getSubFunctions() - Method in interface neureka.math.Function
+
getSubFunctions() - Method in interface neureka.math.Function
 
-
getSubFunctions() - Method in class neureka.math.implementations.FunctionConstant
+
getSubFunctions() - Method in class neureka.math.implementations.FunctionConstant
 
-
getSubFunctions() - Method in class neureka.math.implementations.FunctionInput
+
getSubFunctions() - Method in class neureka.math.implementations.FunctionInput
 
-
getSubFunctions() - Method in class neureka.math.implementations.FunctionNode
+
getSubFunctions() - Method in class neureka.math.implementations.FunctionNode
 
-
getSubFunctions() - Method in class neureka.math.implementations.FunctionVariable
+
getSubFunctions() - Method in class neureka.math.implementations.FunctionVariable
 
-
getSum() - Method in class neureka.math.Functions
+
getSum() - Method in class neureka.math.Functions
 
-
getT() - Method in interface neureka.Tensor
+
getT() - Method in interface neureka.Tensor
A method which returns a new Tensor instance which is a transposed twin of this instance.
- This is an alternative to the functionally identical Tensor.T() method.
+ This is an alternative to the functionally identical Tensor.T() method.
-
getTanh() - Method in class neureka.math.Functions
+
getTanh() - Method in class neureka.math.Functions
 
-
getter(At<Object, Get<GetType>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
+
getter(At<Object, Get<GetType>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
 
-
getThreads() - Static method in class neureka.devices.host.machine.ConcreteMachine
+
getThreads() - Static method in class neureka.devices.host.machine.ConcreteMachine
 
-
getTime() - Method in class neureka.optimization.implementations.ADAM
+
getTime() - Method in class neureka.optimization.implementations.ADAM
 
-
getTotalNumberOfDevices() - Method in class neureka.backend.ocl.CLBackend
+
getTotalNumberOfDevices() - Method in class neureka.backend.ocl.CLBackend
 
-
getTotalSize() - Method in class neureka.devices.file.CSVHandle
+
getTotalSize() - Method in class neureka.devices.file.CSVHandle
 
-
getTotalSize() - Method in interface neureka.devices.file.FileHandle
+
getTotalSize() - Method in interface neureka.devices.file.FileHandle
This method returns the number of bytes which are used to store the tensor in the file whose access is being managed by an implementation of th FileHandle interface.
-
getTotalSize() - Method in class neureka.devices.file.IDXHandle
+
getTotalSize() - Method in class neureka.devices.file.IDXHandle
 
-
getTraits() - Method in interface neureka.ndim.config.NDConfiguration
+
getTraits() - Method in interface neureka.ndim.config.NDConfiguration
 
-
getTranspose2D() - Method in class neureka.math.Functions
+
getTranspose2D() - Method in class neureka.math.Functions
 
-
getType() - Method in class neureka.common.utility.ListReader.Result
+
getType() - Method in class neureka.common.utility.ListReader.Result
 
-
getTypeClassInstance(Class<T>) - Method in class neureka.dtype.DataType
+
getTypeClassInstance(Class<T>) - Method in class neureka.dtype.DataType
 
-
getValOf(Class<T>) - Method in class neureka.backend.api.Call
+
getValOf(Class<T>) - Method in class neureka.backend.api.Call
 
-
getValue() - Method in class neureka.common.utility.Cache.LazyEntry
+
getValue() - Method in class neureka.common.utility.Cache.LazyEntry
 
-
getValueSize() - Method in class neureka.devices.file.CSVHandle
+
getValueSize() - Method in class neureka.devices.file.CSVHandle
 
-
getValueSize() - Method in interface neureka.devices.file.FileHandle
+
getValueSize() - Method in interface neureka.devices.file.FileHandle
This method return the size of the value which is stored in the tensor of the file which is managed by this FileHandle.
-
getValueSize() - Method in class neureka.devices.file.IDXHandle
+
getValueSize() - Method in class neureka.devices.file.IDXHandle
 
-
getVelocity() - Method in class neureka.optimization.implementations.ADAM
+
getVelocity() - Method in class neureka.optimization.implementations.ADAM
 
-
getVersion() - Method in interface neureka.Tensor
+
getVersion() - Method in interface neureka.Tensor
The version number is tracking how often this tensor has been mutated.
-
globalMemSize() - Method in class neureka.devices.opencl.OpenCLDevice
-
 
-
GOOD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
globalMemSize() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
goodIfAll(Call.TensorCompare) - Method in class neureka.backend.api.Call.Validator.Estimator
+
GOOD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
goodIfAll(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
+
goodIfAll(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
goodIfAny(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
+
goodIfAll(Call.TensorCompare) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
goodIfAnyNonNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
+
goodIfAny(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
GPU - Enum constant in enum class neureka.devices.opencl.OpenCLDevice.Type
+
goodIfAnyNonNull(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator.Estimator
 
-
gradient() - Method in interface neureka.Tensor
+
gradient() - Method in interface neureka.Tensor
-
This is a functionally identical alternative to the Tensor.getGradient() method.
+
This is a functionally identical alternative to the Tensor.getGradient() method.
-
gradientApplyRequested() - Method in interface neureka.Tensor
+
gradientApplyRequested() - Method in interface neureka.Tensor
This flag works alongside two autograd features which can be enabled inside the library settings.
-
graphNode() - Method in interface neureka.Tensor
-
-
This is a functionally identical alternative to Tensor.getGraphNode().
-
-
GraphNode<V> - Class in neureka.autograd
+
GraphNode<V> - Class in neureka.autograd
Instances of the GraphNode class are components of tensors (Tensor instances) which model and record computations / operations between them.
-
GraphNode(Function, ExecutionCall<Device<?>>, Supplier<Result>) - Constructor for class neureka.autograd.GraphNode
+
GraphNode(Function, ExecutionCall<Device<?>>, Supplier<Result>) - Constructor for class neureka.autograd.GraphNode
 
-
GraphNode.Print - Enum Class in neureka.autograd
-
 
-
GRAY_BYTE - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
GRAY_USHORT - Enum constant in enum class neureka.Tensor.ImageType
+
graphNode() - Method in interface neureka.Tensor
+
+
This is a functionally identical alternative to Tensor.getGraphNode().
+
+
GraphNode.Print - Enum in neureka.autograd
 
-
groupBy(String, String, String, String) - Static method in class neureka.math.parsing.ParseUtil
+
groupBy(String, String, String, String) - Static method in class neureka.math.parsing.ParseUtil
 
-

H

-
-
Hardware - Class in neureka.devices.host.machine
+ + + +

H

+
+
Hardware - Class in neureka.devices.host.machine
This models the cache levels and threads of a CPU using an array of where each entry represents a memory level.
-
Hardware(String, BasicMachine[]) - Constructor for class neureka.devices.host.machine.Hardware
+
Hardware(String, BasicMachine[]) - Constructor for class neureka.devices.host.machine.Hardware
new BasicMachine[] { SYSTEM, L3, L2, L1 } or new BasicMachine[] { SYSTEM, L2, L1 } or in worst case new BasicMachine[] { SYSTEM, L1 }
-
has(Class<E>) - Method in class neureka.backend.api.BackendContext
+
has(GraphNode<V>) - Method in class neureka.autograd.GraphNode
+
+
This method checks if a given graph node is an AD target of this node.
+
+
has(Class<E>) - Method in class neureka.backend.api.BackendContext
Checks if this context has an instance of the provided BackendExtension type.
-
has(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
+
has(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
-
This method checks if a component identified by the passed Class +
This method checks if a component identified by the passed Class instance is present inside the stored component collection.
-
has(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
+
has(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
Use this to check if a component of the specified component type class is present.
-
has(String) - Method in class neureka.devices.opencl.KernelCache
-
 
-
has(String, boolean) - Method in class neureka.math.FunctionCache
-
 
-
has(GraphNode<V>) - Method in class neureka.autograd.GraphNode
-
-
This method checks if a given graph node is an AD target of this node.
-
-
has(NDTrait) - Method in interface neureka.ndim.config.NDConfiguration
+
has(O) - Method in class neureka.common.utility.Cache
 
-
has(Tensor<T>) - Method in class neureka.devices.AbstractBaseDevice
+
has(Tensor<T>) - Method in class neureka.devices.AbstractBaseDevice
This method checks if the passed tensor is stored on this Device instance.
-
has(Tensor<T>) - Method in interface neureka.devices.Device
+
has(Tensor<T>) - Method in interface neureka.devices.Device
Use this to check if a tensor is stored on this Device!

-
has(Tensor<T>) - Method in class neureka.devices.file.FileDevice
+
has(Tensor<T>) - Method in class neureka.devices.file.FileDevice
 
-
has(O) - Method in class neureka.common.utility.Cache
+
has(String) - Method in class neureka.devices.opencl.KernelCache
 
-
has(cl_device_id) - Method in class neureka.devices.opencl.OpenCLPlatform
+
has(cl_device_id) - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
hasAdHocKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
+
has(String, boolean) - Method in class neureka.math.FunctionCache
 
-
hasChildren() - Method in class neureka.framing.Relation
+
has(NDTrait) - Method in interface neureka.ndim.config.NDConfiguration
 
-
hasDerivatives() - Method in class neureka.autograd.GraphNode
+
hasAdHocKernel(String) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
hasGradient() - Method in interface neureka.Tensor
+
hasChildren() - Method in class neureka.framing.Relation
+
 
+
hasDerivatives() - Method in class neureka.autograd.GraphNode
+
 
+
hasGradient() - Method in interface neureka.Tensor
Tensors can be components of other tensors which makes the implicitly their gradients.
-
hashCode() - Method in class neureka.common.utility.Cache.LazyEntry
+
hashCode() - Method in class neureka.common.utility.Cache.LazyEntry
 
-
hashCode() - Method in class neureka.devices.host.machine.BasicMachine
+
hashCode() - Method in class neureka.devices.host.machine.BasicMachine
 
-
hashCode() - Method in class neureka.devices.host.machine.CommonMachine
+
hashCode() - Method in class neureka.devices.host.machine.CommonMachine
 
-
hashCode() - Method in class neureka.devices.host.machine.ConcreteMachine
+
hashCode() - Method in class neureka.devices.host.machine.ConcreteMachine
 
-
hashCode() - Method in class neureka.devices.host.machine.Hardware
+
hashCode() - Method in class neureka.devices.host.machine.Hardware
 
-
hashCode() - Method in class neureka.devices.opencl.KernelCode
+
hashCode() - Method in class neureka.devices.opencl.KernelCode
 
-
hashCode() - Method in class neureka.dtype.DataType
+
hashCode() - Method in class neureka.dtype.DataType
 
-
hashCode() - Method in class neureka.ndim.config.AbstractNDC
+
hashCode() - Method in class neureka.ndim.config.AbstractNDC
 
-
hashCode() - Method in interface neureka.ndim.config.NDConfiguration
+
hashCode() - Method in interface neureka.ndim.config.NDConfiguration
 
-
hasImplementationFor(D) - Method in interface neureka.backend.api.DeviceAlgorithm
+
hasImplementationFor(D) - Method in interface neureka.backend.api.DeviceAlgorithm
 
-
hasKernel(String) - Method in class neureka.devices.opencl.OpenCLPlatform
+
hasKernel(String) - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
hasLabelsForAxis(Object) - Method in class neureka.framing.NDFrame
+
hasLabelsForAxis(Object) - Method in class neureka.framing.NDFrame
 
-
hasOperation(String) - Method in class neureka.backend.api.BackendContext
+
hasOperation(Operation) - Method in class neureka.backend.api.BackendContext
 
-
hasOperation(Operation) - Method in class neureka.backend.api.BackendContext
+
hasOperation(String) - Method in class neureka.backend.api.BackendContext
 
-
hasParent() - Method in class neureka.framing.Relation
+
hasParent() - Method in class neureka.framing.Relation
 
-
holderArrayType() - Method in class neureka.dtype.custom.F32
+
holderArrayType() - Method in class neureka.dtype.custom.F32
 
-
holderArrayType() - Method in class neureka.dtype.custom.F64
+
holderArrayType() - Method in class neureka.dtype.custom.F64
 
-
holderArrayType() - Method in class neureka.dtype.custom.I16
+
holderArrayType() - Method in class neureka.dtype.custom.I16
 
-
holderArrayType() - Method in class neureka.dtype.custom.I32
+
holderArrayType() - Method in class neureka.dtype.custom.I32
 
-
holderArrayType() - Method in class neureka.dtype.custom.I64
+
holderArrayType() - Method in class neureka.dtype.custom.I64
 
-
holderArrayType() - Method in class neureka.dtype.custom.I8
+
holderArrayType() - Method in class neureka.dtype.custom.I8
 
-
holderArrayType() - Method in class neureka.dtype.custom.UI16
+
holderArrayType() - Method in class neureka.dtype.custom.UI16
 
-
holderArrayType() - Method in class neureka.dtype.custom.UI32
+
holderArrayType() - Method in class neureka.dtype.custom.UI32
 
-
holderArrayType() - Method in class neureka.dtype.custom.UI64
+
holderArrayType() - Method in class neureka.dtype.custom.UI64
 
-
holderArrayType() - Method in class neureka.dtype.custom.UI8
+
holderArrayType() - Method in class neureka.dtype.custom.UI8
 
-
holderArrayType() - Method in interface neureka.dtype.NumericType
+
holderArrayType() - Method in interface neureka.dtype.NumericType
The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]).
-
holderType() - Method in class neureka.dtype.custom.F32
+
holderType() - Method in class neureka.dtype.custom.F32
 
-
holderType() - Method in class neureka.dtype.custom.F64
+
holderType() - Method in class neureka.dtype.custom.F64
 
-
holderType() - Method in class neureka.dtype.custom.I16
+
holderType() - Method in class neureka.dtype.custom.I16
 
-
holderType() - Method in class neureka.dtype.custom.I32
+
holderType() - Method in class neureka.dtype.custom.I32
 
-
holderType() - Method in class neureka.dtype.custom.I64
+
holderType() - Method in class neureka.dtype.custom.I64
 
-
holderType() - Method in class neureka.dtype.custom.I8
+
holderType() - Method in class neureka.dtype.custom.I8
 
-
holderType() - Method in class neureka.dtype.custom.UI16
+
holderType() - Method in class neureka.dtype.custom.UI16
 
-
holderType() - Method in class neureka.dtype.custom.UI32
+
holderType() - Method in class neureka.dtype.custom.UI32
 
-
holderType() - Method in class neureka.dtype.custom.UI64
+
holderType() - Method in class neureka.dtype.custom.UI64
 
-
holderType() - Method in class neureka.dtype.custom.UI8
+
holderType() - Method in class neureka.dtype.custom.UI8
 
-
holderType() - Method in interface neureka.dtype.NumericType
+
holderType() - Method in interface neureka.dtype.NumericType
The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint).
-
HOW_TO_INSTALL_OPENCL - Variable in enum class neureka.devices.opencl.utility.Messages.Tips
+
HOW_TO_INSTALL_OPENCL - Variable in enum neureka.devices.opencl.utility.Messages.Tips
 
-
HOW_TO_INSTALL_OPENCL_DRIVERS - Variable in enum class neureka.devices.opencl.utility.Messages.Tips
+
HOW_TO_INSTALL_OPENCL_DRIVERS - Variable in enum neureka.devices.opencl.utility.Messages.Tips
 
-

I

-
-
i() - Method in interface neureka.ndim.iterator.NDIterator
+ + + +

I

+
+
i() - Method in interface neureka.ndim.iterator.NDIterator
 
-
i() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
-
i() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
-
i() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
-
i() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
-
i() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
-
i() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
-
i() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
-
i() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
-
i() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
-
i() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
i() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
i() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
i() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
i() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
i() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
i() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
i() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
i() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
i() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
i() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
 
-
I16 - Class in neureka.dtype.custom
+
I16 - Class in neureka.dtype.custom
 
-
I16() - Constructor for class neureka.dtype.custom.I16
+
I16() - Constructor for class neureka.dtype.custom.I16
 
-
I32 - Class in neureka.dtype.custom
+
I32 - Class in neureka.dtype.custom
 
-
I32() - Constructor for class neureka.dtype.custom.I32
+
I32() - Constructor for class neureka.dtype.custom.I32
 
-
I64 - Class in neureka.dtype.custom
+
I64 - Class in neureka.dtype.custom
 
-
I64() - Constructor for class neureka.dtype.custom.I64
+
I64() - Constructor for class neureka.dtype.custom.I64
 
-
I8 - Class in neureka.dtype.custom
+
I8 - Class in neureka.dtype.custom
The following abstract class implements some basic logic which is applicable across all final concrete classes extending this abstract one.
-
I8() - Constructor for class neureka.dtype.custom.I8
+
I8() - Constructor for class neureka.dtype.custom.I8
 
-
IAXPY - Class in neureka.backend.main.operations.linear.internal.blas
+
IAXPY - Class in neureka.backend.main.operations.linear.internal.blas
The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y are vectors each with a number of elements that equals n.
-
IAXPY() - Constructor for class neureka.backend.main.operations.linear.internal.blas.IAXPY
+
IAXPY() - Constructor for class neureka.backend.main.operations.linear.internal.blas.IAXPY
 
-
id() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
id() - Method in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarAbsolute
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarCbrt
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarCosinus
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarExp
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarExp
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaSU
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaTU
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaussian
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarGaussianFast
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarGeLU
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarIdentity
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarLog10
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarLogarithm
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarQuadratic
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarReLU
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarSeLU
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarSigmoid
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarSiLU
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarSinus
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarSoftplus
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarSoftsign
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarSqrt
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarTanh
 
-
id() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
+
id() - Method in class neureka.backend.main.implementations.fun.ScalarTanhFast
 
-
identifier(String) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
identifier(String) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
Identity - Class in neureka.backend.main.operations.functions
+
IDENTITY - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Identity() - Constructor for class neureka.backend.main.operations.functions.Identity
+
Identity - Class in neureka.backend.main.operations.functions
 
-
IDENTITY - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
Identity() - Constructor for class neureka.backend.main.operations.functions.Identity
 
-
IDOT - Class in neureka.backend.main.operations.linear.internal.blas
+
IDOT - Class in neureka.backend.main.operations.linear.internal.blas
The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are elements of vectors x and y.
-
IDOT() - Constructor for class neureka.backend.main.operations.linear.internal.blas.IDOT
+
IDOT() - Constructor for class neureka.backend.main.operations.linear.internal.blas.IDOT
 
-
IDXHandle - Class in neureka.devices.file
+
IDXHandle - Class in neureka.devices.file
This class is one of many extensions of the AbstractFileHandle which is therefore ultimately an implementation of the FileHandle interface.
-
IDXHandle(String) - Constructor for class neureka.devices.file.IDXHandle
+
IDXHandle(String) - Constructor for class neureka.devices.file.IDXHandle
 
-
IDXHandle(Tensor<Number>, String) - Constructor for class neureka.devices.file.IDXHandle
+
IDXHandle(Tensor<Number>, String) - Constructor for class neureka.devices.file.IDXHandle
 
-
idy() - Method in class neureka.math.Functions
+
idy() - Method in class neureka.math.Functions
 
-
ifValid(T) - Method in class neureka.backend.api.Call.Validator
+
ifValid(T) - Method in class neureka.backend.api.Call.Validator
 
-
IGEMM - Class in neureka.backend.main.operations.linear.internal.blas
+
IGEMM - Class in neureka.backend.main.operations.linear.internal.blas
A collection of primitive sub-routines for matrix multiplication performed on continuous arrays which are designed so that they can be vectorized by the JVMs JIT compiler (AVX instructions).
-
IGEMM() - Constructor for class neureka.backend.main.operations.linear.internal.blas.IGEMM
+
IGEMM() - Constructor for class neureka.backend.main.operations.linear.internal.blas.IGEMM
 
-
IGEMM.VectorOperationI32 - Interface in neureka.backend.main.operations.linear.internal.blas
+
IGEMM.VectorOperationI32 - Interface in neureka.backend.main.operations.linear.internal.blas
 
-
IGEMM.VectorOperationI64 - Interface in neureka.backend.main.operations.linear.internal.blas
+
IGEMM.VectorOperationI64 - Interface in neureka.backend.main.operations.linear.internal.blas
 
-
image2DMaxHeight() - Method in class neureka.devices.opencl.OpenCLDevice
+
image2DMaxHeight() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
image2DMaxWidth() - Method in class neureka.devices.opencl.OpenCLDevice
+
image2DMaxWidth() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
image3DMaxDepth() - Method in class neureka.devices.opencl.OpenCLDevice
+
image3DMaxDepth() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
image3DMaxHeight() - Method in class neureka.devices.opencl.OpenCLDevice
+
image3DMaxHeight() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
image3DMaxWidth() - Method in class neureka.devices.opencl.OpenCLDevice
+
image3DMaxWidth() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
imageSupport() - Method in class neureka.devices.opencl.OpenCLDevice
+
imageSupport() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
ImplementationFor<D extends Device<?>> - Interface in neureka.backend.api
+
ImplementationFor<D extends Device<?>> - Interface in neureka.backend.api
Generally speaking, this interface describes the functionality of an implementation of an execution procedure tailored to a specific Device (interface) instance @@ -4112,879 +4068,875 @@

I

of instances of implementations of the Algorithm interface, which themselves are components of Operation implementation instances.
-
ImplementationReceiver - Interface in neureka.backend.api.ini
+
ImplementationReceiver - Interface in neureka.backend.api.ini
 
-
in(Supplier<R>) - Method in interface neureka.devices.Device.In
+
in(Supplier<R>) - Method in interface neureka.devices.Device.In
 
-
increment() - Method in class neureka.devices.ReferenceCounter
+
increment() - Method in class neureka.devices.ReferenceCounter
 
-
increment() - Method in interface neureka.ndim.iterator.NDIterator
+
increment(int[], int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
 
-
increment() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
-
increment() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
-
increment() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
increment() - Method in interface neureka.ndim.iterator.NDIterator
 
-
increment(int[], int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
+
increment() - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
increment() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
increment() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
 
-
INCREMENT - Enum constant in enum class neureka.devices.ReferenceCounter.ChangeType
+
incrementUsageCount() - Method in class neureka.devices.AbstractDeviceData
 
-
incrementUsageCount() - Method in class neureka.devices.AbstractDeviceData
+
incrementUsageCount() - Method in interface neureka.devices.DeviceData
 
-
incrementUsageCount() - Method in interface neureka.devices.DeviceData
-
 
-
incrementVersion(ExecutionCall<?>) - Method in interface neureka.MutateTensor
+
incrementVersion(ExecutionCall<?>) - Method in interface neureka.MutateTensor
This method is responsible for incrementing the "_version" field variable which represents the version of the data of this tensor.
-
indent(int) - Static method in class neureka.view.NdaAsString.Util
+
indent(int) - Static method in class neureka.view.NdaAsString.Util
 
-
index() - Method in class neureka.math.implementations.FunctionInput
+
index() - Method in class neureka.math.implementations.FunctionInput
 
-
indexOfIndex(int) - Method in interface neureka.ndim.config.NDConfiguration
+
indexOfIndex(int) - Method in interface neureka.ndim.config.NDConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
indexOfIndex(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
-
indexOfIndex(int) - Method in interface neureka.ndim.NDimensional
+
indexOfIndex(int) - Method in interface neureka.ndim.NDimensional
This is a convenience method identical to ndArray.getNDConf().indexOfIndex(i).
-
indexOfIndices(int) - Method in class neureka.ndim.config.types.D1C
-
 
-
indexOfIndices(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
-
indexOfIndices(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
-
indexOfIndices(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
-
indexOfIndices(int[]) - Method in interface neureka.ndim.config.NDConfiguration
+
indexOfIndices(int[]) - Method in interface neureka.ndim.config.NDConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
indexOfIndices(int) - Method in class neureka.ndim.config.types.D1C
+
 
+
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.D2C
+
 
+
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.D3C
+
 
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
indexOfIndices(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
indexOfIndices(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
indexOfIndices(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
indexOfIndices(int[]) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
The following method calculates the true index for an element in the data array based on a provided index array.
-
indexOfIndices(int[]) - Method in interface neureka.ndim.NDimensional
+
indexOfIndices(int[]) - Method in interface neureka.ndim.NDimensional
This is a convenience method identical to ndArray.getNDConf().indexOfIndices(indices).
-
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.D2C
-
 
-
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
-
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
-
indexOfIndices(int, int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
-
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.D3C
+
INDICES_MAPPER_ID - Static variable in class neureka.backend.main.operations.linear.internal.opencl.CLReduce
 
-
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
-
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
-
indexOfIndices(int, int, int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
-
INDICES_MAPPER_ID - Static variable in class neureka.backend.main.operations.linear.internal.opencl.CLReduce
-
 
-
indicesMap() - Method in interface neureka.ndim.config.NDConfiguration
+
indicesMap() - Method in interface neureka.ndim.config.NDConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
indicesMap(int) - Method in interface neureka.ndim.config.NDConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
-
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() - is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
+
This method receives an axis index and return the + indices mapping value of said axis to enable readable access to the indices map + of this configuration.
-
indicesMap() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices for every axis of the tensor represented by this NDConfiguration.
-
indicesMap() - Method in interface neureka.ndim.NDimensional
-
 
-
indicesMap(int) - Method in interface neureka.ndim.config.NDConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
 
+
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
-
 
-
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
indicesMap() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
indicesMap() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
-
This method receives an axis index and return the - indices mapping value of said axis to enable readable access to the indices map - of this configuration.
+
If one wants to for example access the fourth last item of all items + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + is needed as a basis for translating said scalar index x to an array of indices + for every axis of the tensor represented by this NDConfiguration.
-
indicesMap(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
indicesMap(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
-
indicesOfIndex(int) - Method in interface neureka.ndim.config.NDConfiguration
+
indicesMap() - Method in interface neureka.ndim.NDimensional
+
 
+
indicesOfIndex(int) - Method in interface neureka.ndim.config.NDConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
indicesOfIndex(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
-
indicesOfIndex(int) - Method in interface neureka.ndim.NDimensional
+
indicesOfIndex(int) - Method in interface neureka.ndim.NDimensional
This is a convenience method identical to ndArray.getNDConf().IndicesOfIndex(i).
-
init(int, int[]) - Method in interface neureka.ndim.Filler
+
init(int, int[]) - Method in interface neureka.ndim.Filler
 
-
initialScramble(long) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
+
initialScramble(long) - Static method in class neureka.backend.main.implementations.elementwise.CPURandomization
 
-
input(int) - Method in class neureka.backend.api.Call
+
input(int) - Method in class neureka.backend.api.Call
 
-
input(Class<V>, int) - Method in class neureka.backend.api.Call
+
input(Class<V>, int) - Method in class neureka.backend.api.Call
 
-
inputIndex() - Method in class neureka.autograd.ADTarget
+
inputIndex() - Method in class neureka.autograd.ADTarget
 
-
inputs() - Method in class neureka.backend.api.Call
+
inputs() - Method in class neureka.backend.api.Call
 
-
INSTANCE - Static variable in interface neureka.devices.DeviceCleaner
+
INSTANCE - Static variable in interface neureka.devices.DeviceCleaner
 
-
intoRange(int, int) - Method in interface neureka.devices.Device.Writer
+
intoRange(int, int) - Method in interface neureka.devices.Device.Writer
Writes whatever kind of data was previously specified, to the tensors' data into the range targeted by the provided start and limit.
-
intStream(int, int) - Static method in class neureka.common.utility.DataConverter.Utility
+
intStream(int, int) - Static method in class neureka.common.utility.DataConverter.Utility
-
Use this to create a range based IntStream +
Use this to create a range based IntStream which is only parallel if the provided threshold smaller than the provided workload size.
-
intToBigInteger(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
intToBigInteger(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
intToByte(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
intToByte(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
intToDouble(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
intToDouble(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
intToFloat(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
intToFloat(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
intToLong(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
intToLong(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
intToShort(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
intToShort(int[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
invert(int[]) - Static method in class neureka.backend.main.operations.other.Permute
+
invert(int[]) - Static method in class neureka.backend.main.operations.other.Permute
 
-
invoke(boolean) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(Supplier<T>) - Method in class neureka.backend.api.BackendContext.Runner
+
+
Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance.
+
+
invoke(double, double) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(boolean, boolean) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(float, float) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(byte) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(int, int) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(byte, byte) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(long, long) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(char) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(byte, byte) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(char, char) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(short, short) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(double) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(boolean, boolean) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(double) - Method in interface neureka.math.Function
-
-
Invokes this Function with the provided scalar as a single input and returns the scalar result.
-
-
invoke(double...) - Method in interface neureka.math.Function
-
-
Invokes this Function with the provided array of inputs.
-
-
invoke(double[], double[], int, double[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.GEMM.VectorOperationF64
+
invoke(char, char) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(double[], int) - Method in interface neureka.math.Function
-
-
Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
-
-
invoke(double[], int, double[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
+
invoke(Object, Object) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
 
-
invoke(double[], int, double, double[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.AXPY
+
invoke(double) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(double, double) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(float) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(float) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(int) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(float[], float[], int, float[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.GEMM.VectorOperationF32
+
invoke(long) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(float[], int, float[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
+
invoke(byte) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(float[], int, float, float[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.AXPY
+
invoke(short) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(float, float) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(boolean) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(int) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(char) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(int[], int[], int, int[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.IGEMM.VectorOperationI32
+
invoke(Object) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
 
-
invoke(int[], int, int[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
+
invoke(double[], int, double, double[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.AXPY
 
-
invoke(int[], int, int[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IDOT
+
invoke(float[], int, float, float[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.AXPY
 
-
invoke(int[], int, int, int[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IAXPY
+
invoke(double[], int, double[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
 
-
invoke(int, int) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(float[], int, float[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
 
-
invoke(long) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(long[], int, long[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
 
-
invoke(long[], int, long[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
+
invoke(int[], int, int[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.DOT
 
-
invoke(long[], int, long[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IDOT
+
invoke(float[], float[], int, float[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.GEMM.VectorOperationF32
 
-
invoke(long[], int, long, long[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IAXPY
+
invoke(double[], double[], int, double[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.GEMM.VectorOperationF64
 
-
invoke(long[], long[], int, long[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.IGEMM.VectorOperationI64
+
invoke(long[], int, long, long[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IAXPY
 
-
invoke(long, long) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(int[], int, int, int[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IAXPY
 
-
invoke(short) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(long[], int, long[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IDOT
 
-
invoke(short, short) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(int[], int, int[], int, int, int) - Static method in class neureka.backend.main.operations.linear.internal.blas.IDOT
 
-
invoke(Object) - Method in interface neureka.backend.main.implementations.fun.api.CPUFun
+
invoke(int[], int[], int, int[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.IGEMM.VectorOperationI32
 
-
invoke(Object, Object) - Method in interface neureka.backend.main.implementations.fun.api.CPUBiFun
+
invoke(long[], long[], int, long[]) - Method in interface neureka.backend.main.operations.linear.internal.blas.IGEMM.VectorOperationI64
 
-
invoke(ExecutorService, int, int, int) - Method in class neureka.devices.host.concurrent.WorkScheduler
+
invoke(ExecutorService, int, int, int) - Method in class neureka.devices.host.concurrent.WorkScheduler
Synchronous execution - wait until it's finished.
-
invoke(Supplier<T>) - Method in class neureka.backend.api.BackendContext.Runner
+
invoke(Tensor<T>...) - Method in interface neureka.math.Function.Callable
-
Use this method to supply a lambda which will be executed in the BackendContext - which produced this very BackendContext.Runner instance.
+
This method is functionally identically to Function.Callable.call(Tensor[]), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
-
invoke(List<Tensor<T>>) - Method in interface neureka.math.Function
+
invoke(double) - Method in interface neureka.math.Function
-
This method is functionally identically to Function.call(List), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
+
Invokes this Function with the provided scalar as a single input and returns the scalar result.
+
+
invoke(double[], int) - Method in interface neureka.math.Function
+
+
Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
+
+
invoke(double...) - Method in interface neureka.math.Function
+
+
Invokes this Function with the provided array of inputs.
-
invoke(Call.Builder<T, D>) - Method in interface neureka.math.Function
+
invoke(Call.Builder<T, D>) - Method in interface neureka.math.Function
Use this to pass more context information for execution of input tensors.
-
invoke(Args, Tensor<T>...) - Method in interface neureka.math.Function
+
invoke(Args, Tensor<T>...) - Method in interface neureka.math.Function
Use this to call this Function alongside with some additional meta-arguments which will be passed to the underlying Operation(s).
-
invoke(Tensor<T>) - Method in interface neureka.math.Function
+
invoke(Tensor<T>) - Method in interface neureka.math.Function
-
This method is functionally identically to Function.call(Tensor), however it is best used +
This method is functionally identically to Function.call(Tensor), however it is best used in Kotlin, where one can omit the function name entirely and call this Function directly!
-
invoke(Tensor<T>...) - Method in interface neureka.math.Function.Callable
+
invoke(List<Tensor<T>>) - Method in interface neureka.math.Function
-
This method is functionally identically to Function.Callable.call(Tensor[]), however it is best used +
This method is functionally identically to Function.call(List), however it is best used in Kotlin, where one can omit the function name entirely and call this Function directly!
-
invoke(Tensor<T>...) - Method in interface neureka.math.Function
+
invoke(Tensor<T>[], int) - Method in interface neureka.math.Function
-
This method is functionally identically to Function.call(Tensor[]), however it is best used +
This method is functionally identically to Function.call(Tensor[], int), however it is best used in Kotlin, where one can omit the function name entirely and call this Function directly!
-
invoke(Tensor<T>[], int) - Method in interface neureka.math.Function
+
invoke(Tensor<T>...) - Method in interface neureka.math.Function
-
This method is functionally identically to Function.call(Tensor[], int), however it is best used +
This method is functionally identically to Function.call(Tensor[]), however it is best used in Kotlin, where one can omit the function name entirely and call this Function directly!
-
is(Class<?>) - Method in interface neureka.Tensor
+
is(Class<?>) - Method in interface neureka.Tensor
This method compares the passed class with the underlying data-type of this NDArray.
-
isAnOperation(String) - Static method in class neureka.math.parsing.ParseUtil
+
isAnOperation(String) - Static method in class neureka.math.parsing.ParseUtil
 
-
isApplyingGradientWhenRequested() - Method in class neureka.Neureka.Settings.AutoGrad
+
isApplyingGradientWhenRequested() - Method in class neureka.Neureka.Settings.AutoGrad
Gradients will only be applied if requested.
-
isApplyingGradientWhenTensorIsUsed() - Method in class neureka.Neureka.Settings.AutoGrad
+
isApplyingGradientWhenTensorIsUsed() - Method in class neureka.Neureka.Settings.AutoGrad
Gradients will automatically be applied (or JITed) to tensors as soon as they are being used for calculation (GraphNode instantiation).
-
isAutoConvertToFloat() - Method in class neureka.backend.ocl.CLSettings
+
isAutoConvertToFloat() - Method in class neureka.backend.ocl.CLSettings
 
-
isBranch() - Method in interface neureka.Tensor
+
isBranch() - Method in interface neureka.Tensor
Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
-
isCase(Tensor<V>) - Method in interface neureka.Tensor
+
isCase(Tensor<V>) - Method in interface neureka.Tensor
This method name translates to the "in" keyword in Groovy! The same is true for the "contains" method in Kotlin.
-
isCompact() - Method in interface neureka.ndim.config.NDConfiguration
+
isCompact() - Method in interface neureka.ndim.config.NDConfiguration
NDConfiguration instance where this flag is true will most likely not be slices because they have no offset (all 0) and a compact spread / step array (all 1).
-
isCompatible(NDConfiguration.Layout) - Method in enum class neureka.ndim.config.NDConfiguration.Layout
+
isCompatible(NDConfiguration.Layout) - Method in enum neureka.ndim.config.NDConfiguration.Layout
 
-
isDeleted() - Method in interface neureka.Tensor
+
isDeleted() - Method in interface neureka.Tensor
-
This will check if the MutateTensor.delete() method was previously called on this tensor.
+
This will check if the MutateTensor.delete() method was previously called on this tensor.
-
isDeletingIntermediateTensors() - Method in class neureka.Neureka.Settings.Debug
+
isDeletingIntermediateTensors() - Method in class neureka.Neureka.Settings.Debug
Function instances will produce hidden intermediate results when executing an array of inputs.
-
isDifferentiable() - Method in interface neureka.backend.api.Operation
+
isDifferentiable() - Method in interface neureka.backend.api.Operation
-
Deprecated.
+
Deprecated.
-
isDifferentiable() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
isDifferentiable() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
isDifferentiable(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
isDifferentiable(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
isDoingAD() - Method in interface neureka.math.Function
+
isDoingAD() - Method in interface neureka.math.Function
Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions - whose Function.isFlat() flag is set to false!
+ whose Function.isFlat() flag is set to false!
-
isDoingAD() - Method in class neureka.math.implementations.FunctionConstant
+
isDoingAD() - Method in class neureka.math.implementations.FunctionConstant
 
-
isDoingAD() - Method in class neureka.math.implementations.FunctionInput
+
isDoingAD() - Method in class neureka.math.implementations.FunctionInput
 
-
isDoingAD() - Method in class neureka.math.implementations.FunctionNode
+
isDoingAD() - Method in class neureka.math.implementations.FunctionNode
 
-
isDoingAD() - Method in class neureka.math.implementations.FunctionVariable
+
isDoingAD() - Method in class neureka.math.implementations.FunctionVariable
 
-
isDone() - Method in class neureka.autograd.JITProp
+
isDone() - Method in class neureka.autograd.JITProp
 
-
isEmpty() - Method in class neureka.devices.AbstractBaseDevice
+
isEmpty() - Method in class neureka.devices.AbstractBaseDevice
A device is empty if there are no tensors stored on it.
-
isEmpty() - Method in class neureka.devices.file.IDXHandle
-
 
-
isEmpty() - Method in interface neureka.devices.Storage
+
isEmpty() - Method in interface neureka.devices.Storage
 
-
isEmpty() - Method in interface neureka.Tensor
+
isEmpty() - Method in interface neureka.Tensor
A tensor is empty if it's Data storage is null.
-
isFirstColIsIndex() - Method in class neureka.devices.file.CSVHandle
+
isFirstColIsIndex() - Method in class neureka.devices.file.CSVHandle
 
-
isFirstRowIsLabels() - Method in class neureka.devices.file.CSVHandle
+
isFirstRowIsLabels() - Method in class neureka.devices.file.CSVHandle
 
-
isFlat() - Method in interface neureka.math.Function
+
isFlat() - Method in interface neureka.math.Function
 
-
isFlat() - Method in class neureka.math.implementations.FunctionConstant
+
isFlat() - Method in class neureka.math.implementations.FunctionConstant
 
-
isFlat() - Method in class neureka.math.implementations.FunctionInput
+
isFlat() - Method in class neureka.math.implementations.FunctionInput
 
-
isFlat() - Method in class neureka.math.implementations.FunctionNode
+
isFlat() - Method in class neureka.math.implementations.FunctionNode
 
-
isFlat() - Method in class neureka.math.implementations.FunctionVariable
+
isFlat() - Method in class neureka.math.implementations.FunctionVariable
 
-
isFullSlice() - Method in interface neureka.Nda
+
isFullSlice() - Method in interface neureka.Nda
If this nd-array is a full slice of a parent nd-array then this method will yield true.
-
isGraphLeave() - Method in class neureka.autograd.GraphNode
+
isGraphLeave() - Method in class neureka.autograd.GraphNode
 
-
isIndexer() - Method in interface neureka.backend.api.Operation
+
isIndexer() - Method in interface neureka.backend.api.Operation
This boolean property tell the Function implementations that this Operation ought to be viewed as something to be indexed.
-
isIndexer() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
isIndexer() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
isIndexer(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
isIndexer(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
isInline() - Method in interface neureka.backend.api.Operation
+
isInline() - Method in interface neureka.backend.api.Operation
This flag indicates that the implementation of this Operation performs an operation which modifies the inputs to that operation.
-
isInline() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
isInline() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
isInline(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
isInline(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
isIntermediate() - Method in interface neureka.Tensor
+
isIntermediate() - Method in interface neureka.Tensor
Intermediate tensors are internal non-user tensors which may be eligible for deletion when further consumed by a Function.
-
isKeepingDerivativeTargetPayloads() - Method in class neureka.Neureka.Settings.Debug
+
isKeepingDerivativeTargetPayloads() - Method in class neureka.Neureka.Settings.Debug
Every derivative is calculated with respect to some graph node.
-
isL2Specified() - Method in class neureka.devices.host.machine.Hardware
+
isL2Specified() - Method in class neureka.devices.host.machine.Hardware
 
-
isL3Specified() - Method in class neureka.devices.host.machine.Hardware
+
isL3Specified() - Method in class neureka.devices.host.machine.Hardware
 
-
isLeave() - Method in class neureka.autograd.GraphNode
+
isLeave() - Method in class neureka.autograd.GraphNode
This node (and the corresponding tensor) was not created by a function! (it's a leave tensor)
-
isLeave() - Method in interface neureka.Tensor
+
isLeave() - Method in interface neureka.Tensor
Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
-
isLocked() - Method in class neureka.Neureka.Settings
+
isLocked() - Method in class neureka.Neureka.Settings
Locked settings can only be read but not written to.
-
isOnlyUsingDefaultNDConfiguration() - Method in class neureka.Neureka.Settings.NDim
+
isOnlyUsingDefaultNDConfiguration() - Method in class neureka.Neureka.Settings.NDim
This flag determines which NDConfiguration implementations should be used for nd-arrays/tensors.
-
isOperator() - Method in interface neureka.backend.api.Operation
+
isOperator() - Method in interface neureka.backend.api.Operation
An operator is an alternative to a function like "sum()" or "prod()".
-
isOperator() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
isOperator() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
isOperator(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
isOperator(boolean) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
isOutsourced() - Method in interface neureka.Tensor
+
isOutsourced() - Method in interface neureka.Tensor
Outsourced means that the tensor is stored on a Device implementation instance which is not the CPU.
-
isPartialSlice() - Method in interface neureka.Nda
+
isPartialSlice() - Method in interface neureka.Nda
If this nd-array is a partial slice of a parent nd-array then this method will yield true.
-
isPartialSlice() - Method in interface neureka.Tensor
+
isPartialSlice() - Method in interface neureka.Tensor
If this nd-array is a partial slice of a parent nd-array then this method will yield true.
-
isPreventingInlineOperations() - Method in class neureka.Neureka.Settings.AutoGrad
+
isPreventingInlineOperations() - Method in class neureka.Neureka.Settings.AutoGrad
Inline operations are operations where the data of a tensor passed into an operation is being modified.
-
isReliesOnJustInTimeProp() - Method in class neureka.autograd.GraphNode
+
isReliesOnJustInTimeProp() - Method in class neureka.autograd.GraphNode
This flag is used for a performance optimization feature namely 'Just In Time Propagation'.
-
isRetainingPendingErrorForJITProp() - Method in class neureka.Neureka.Settings.AutoGrad
+
isRetainingPendingErrorForJITProp() - Method in class neureka.Neureka.Settings.AutoGrad
This flag enables an optimization technique which only propagates error values to gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them @@ -4992,37 +4944,37 @@

I

If the flag is set to true
then error values will accumulate at such junction nodes.
-
isShallowCopy() - Method in interface neureka.Nda
+
isShallowCopy() - Method in interface neureka.Nda
If this nd-array is a shallow copy of a parent nd-array then this method will yield true.
-
isShallowCopy() - Method in interface neureka.Tensor
+
isShallowCopy() - Method in interface neureka.Tensor
If this nd-array is a shallow copy of a parent nd-array then this method will yield true.
-
isSimple() - Method in interface neureka.ndim.config.NDConfiguration
+
isSimple() - Method in interface neureka.ndim.config.NDConfiguration
The boolean returned by this method simply reports if this configuration is the most basic form of configuration possible for the given shape represented by this instance.
-
isSlice() - Method in interface neureka.Nda
+
isSlice() - Method in interface neureka.Nda
If this nd-array is a slice of a parent nd-array then this method will yield true.
-
isSlice() - Method in interface neureka.Tensor
+
isSlice() - Method in interface neureka.Tensor
If this nd-array is a slice of a parent nd-array then this method will yield true.
-
isSliceParent() - Method in interface neureka.Nda
+
isSliceParent() - Method in interface neureka.Nda
If slices have been derived from this nd-array then it is a "slice parent".
-
isSliceParent() - Method in interface neureka.Tensor
+
isSliceParent() - Method in interface neureka.Tensor
If slices have been derived from this nd-array then it is a "slice parent".
-
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.SuitabilityPredicate
+
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.SuitabilityPredicate
When an ExecutionCall instance has been formed then it will be routed by
the given Operation instance to their components, namely :
@@ -5031,27 +4983,27 @@

I

The ability to decide which algorithm is suitable for a given ExecutionCall instance
is being granted by implementations of the following method.
-
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
The SuitabilityPredicate checks if a given instance of an ExecutionCall is suitable to be executed in ImplementationFor residing in this Algorithm as components.
-
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
The SuitabilityPredicate checks if a given instance of an ExecutionCall is suitable to be executed in ImplementationFor residing in this Algorithm as components.
-
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
isSuitableFor(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
isUndefined() - Method in interface neureka.Tensor
+
isUndefined() - Method in interface neureka.Tensor
A tensor is "undefined" if it has either no NDConfiguration implementation instance or this instance does not have a shape set for this Tensor which is needed for a tensor to also have a rank and dimensionality...
-
isUsedAsDerivative() - Method in class neureka.autograd.GraphNode
+
isUsedAsDerivative() - Method in class neureka.autograd.GraphNode
The chain-rule states that the derivative of f(x) = h(g(x)) with respect to x is: g'(x) * h'(g(x)) An example would be: @@ -5059,603 +5011,610 @@

I

f'(x) = (1*y) * (1*z) = z*y The values z,y or z*y must not be deleted as they are needed for back-propagation!
-
isValid() - Method in class neureka.backend.api.Call.Validator
+
isValid() - Method in class neureka.backend.api.Call.Validator
 
-
isVirtual() - Method in interface neureka.ndim.config.NDConfiguration
+
isVirtual() - Method in interface neureka.ndim.config.NDConfiguration
 
-
isVirtual() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
-
isVirtual() - Method in interface neureka.Tensor
+
isVirtual() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
isVirtual() - Method in interface neureka.Tensor
A Virtual tensor is a tensor whose underlying data array is of size 1, holding only a single value.
-
isWronglyIntermediate() - Method in class neureka.backend.main.memory.MemValidator
+
isWronglyIntermediate() - Method in class neureka.backend.main.memory.MemValidator
 
-
isWronglyNonIntermediate() - Method in class neureka.backend.main.memory.MemValidator
+
isWronglyNonIntermediate() - Method in class neureka.backend.main.memory.MemValidator
 
-
item() - Method in interface neureka.Nda
-
-
Equivalent to the #item(0) and Nda.getItem().
-
-
item(int) - Method in interface neureka.Nda
+
item(int) - Method in interface neureka.Nda
The following method returns a single item within this nd-array targeted by the provided integer index.
-
item(int...) - Method in interface neureka.Nda
+
item(int...) - Method in interface neureka.Nda
This method returns a raw value item within this nd-array targeted by an index array which is expected to hold an index for every dimension of the shape of this nd-array.
-
items() - Method in interface neureka.Nda
+
item() - Method in interface neureka.Nda
-
A more concise version of the Nda.getItems() method.
+
Equivalent to the #item(0) and Nda.getItem().
-
itemType() - Method in interface neureka.Nda
+
items() - Method in interface neureka.Nda
+
+
A more concise version of the Nda.getItems() method.
+
+
itemType() - Method in interface neureka.Nda
 
-
iterator() - Method in interface neureka.Shape
+
iterator() - Method in interface neureka.Shape
 
-
IterByOrIterFromOrAll<V> - Interface in neureka.fluent.building.states
+
IterByOrIterFromOrAll<V> - Interface in neureka.fluent.building.states
 
-
IterByOrIterFromOrAllTensor<V> - Interface in neureka.fluent.building.states
+
IterByOrIterFromOrAllTensor<V> - Interface in neureka.fluent.building.states
 
-

J

-
-
JITProp<V> - Class in neureka.autograd
+ + + +

J

+
+
JITProp<V> - Class in neureka.autograd
This class keeps track of graph nodes which require back-propagation in order to be able to continue the process at a later point in time (based on some configurable conditions).
-
JITProp(Set<GraphNode<V>>) - Constructor for class neureka.autograd.JITProp
+
JITProp(Set<GraphNode<V>>) - Constructor for class neureka.autograd.JITProp
 
-
JVMExecutor() - Constructor for class neureka.devices.host.CPU.JVMExecutor
+
JVMExecutor() - Constructor for class neureka.devices.host.CPU.JVMExecutor
 
-

K

-
-
K - Static variable in class neureka.devices.host.machine.CommonMachine
+ + + +

K

+
+
K - Static variable in class neureka.devices.host.machine.CommonMachine
 
-
keep(Tensor<?>[], Supplier<T>) - Static method in class neureka.backend.main.memory.MemUtil
+
keep(Tensor<?>[], Supplier<T>) - Static method in class neureka.backend.main.memory.MemUtil
This method makes sure that the provided tensors do not get deleted - by setting the Tensor.isIntermediate() flag to off - during the execution of the provided Supplier lambda! + by setting the Tensor.isIntermediate() flag to off + during the execution of the provided Supplier lambda! In said lambda the supplied thing will ultimately be returned by this method...
-
keep(Tensor<?>, Tensor<?>, Supplier<T>) - Static method in class neureka.backend.main.memory.MemUtil
+
keep(Tensor<?>, Tensor<?>, Supplier<T>) - Static method in class neureka.backend.main.memory.MemUtil
This method makes sure that the provided tensors do not get deleted - by setting the Tensor.isIntermediate() flag to off - during the execution of the provided Supplier lambda! + by setting the Tensor.isIntermediate() flag to off + during the execution of the provided Supplier lambda! In said lambda the supplied thing will ultimately be returned by this method...
-
KernelCache - Class in neureka.devices.opencl
+
KernelCache - Class in neureka.devices.opencl
A fixed sized cache for ad-hoc (just in time compiled) OpenCLDevice kernels.
-
KernelCache() - Constructor for class neureka.devices.opencl.KernelCache
+
KernelCache() - Constructor for class neureka.devices.opencl.KernelCache
 
-
KernelCaller - Class in neureka.devices.opencl
+
KernelCaller - Class in neureka.devices.opencl
Instances of this class are utility factories provided by OpenCLDevice instances.
-
KernelCaller(cl_kernel, cl_command_queue) - Constructor for class neureka.devices.opencl.KernelCaller
+
KernelCaller(cl_kernel, cl_command_queue) - Constructor for class neureka.devices.opencl.KernelCaller
 
-
KernelCode - Class in neureka.devices.opencl
+
KernelCode - Class in neureka.devices.opencl
 
-
KernelCode(String, String) - Constructor for class neureka.devices.opencl.KernelCode
+
KernelCode(String, String) - Constructor for class neureka.devices.opencl.KernelCode
 
-
KernelCode(String, String, DataType<?>) - Constructor for class neureka.devices.opencl.KernelCode
+
KernelCode(String, String, DataType<?>) - Constructor for class neureka.devices.opencl.KernelCode
 
-
KernelSource - Interface in neureka.devices.opencl
+
KernelSource - Interface in neureka.devices.opencl
Provides kernel source code for a provided ExecutionCall.
-

L

-
-
label() - Method in interface neureka.Nda
+ + + +

L

+
+
label(String) - Method in interface neureka.MutateNda
-
A nd-array can have a label.
+
Sets the label of this nd-array.
-
label(String) - Method in interface neureka.MutateNda
+
label(String) - Method in interface neureka.MutateTensor
Sets the label of this nd-array.
-
label(String) - Method in interface neureka.MutateTensor
+
label() - Method in interface neureka.Nda
-
Sets the label of this nd-array.
-
-
labelAxes(String[]...) - Method in interface neureka.MutateNda
-
-
This method receives a nested String array which - ought to contain a label for the index of this tensor.
-
-
labelAxes(String[]...) - Method in interface neureka.MutateTensor
-
-
This method receives a label for this tensor and a - nested String array which ought to contain a - label for the index of this tensor.
+
A nd-array can have a label.
-
labelAxes(List<List<Object>>) - Method in interface neureka.MutateNda
+
labelAxes(String[]...) - Method in interface neureka.MutateNda
-
This method receives a nested String list which +
This method receives a nested String array which ought to contain a label for the index of this tensor.
-
labelAxes(List<List<Object>>) - Method in interface neureka.MutateTensor
+
labelAxes(List<List<Object>>) - Method in interface neureka.MutateNda
-
This method receives a nested String list which +
This method receives a nested String list which ought to contain a label for the index of this tensor.
-
labelAxes(Map<Object, List<Object>>) - Method in interface neureka.MutateNda
+
labelAxes(Map<Object, List<Object>>) - Method in interface neureka.MutateNda
This method provides the ability to label not only the indices of the shape of this tensor, but also the dimension of the shape.
-
labelAxes(Map<Object, List<Object>>) - Method in interface neureka.MutateTensor
+
labelAxes(String[]...) - Method in interface neureka.MutateTensor
+
+
This method receives a label for this tensor and a + nested String array which ought to contain a + label for the index of this tensor.
+
+
labelAxes(List<List<Object>>) - Method in interface neureka.MutateTensor
+
+
This method receives a nested String list which + ought to contain a label for the index of this tensor.
+
+
labelAxes(Map<Object, List<Object>>) - Method in interface neureka.MutateTensor
This method provides the ability to label not only the indices of the shape of this tensor, but also the dimension of the shape.
-
last(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
+
last(Call.TensorCondition) - Method in class neureka.backend.api.Call.Validator
 
-
LazyEntry(K, Function<K, V>) - Constructor for class neureka.common.utility.Cache.LazyEntry
+
LazyEntry(K, Function<K, V>) - Constructor for class neureka.common.utility.Cache.LazyEntry
 
-
LazyRef<V> - Class in neureka.backend.api
+
LazyRef<V> - Class in neureka.backend.api
This will simply fetch a variable from a lambda once and then continuously return this one value.
-
learningRate() - Method in class neureka.optimization.implementations.SGD
+
learningRate() - Method in class neureka.optimization.implementations.SGD
 
-
like(Tensor<V>) - Static method in interface neureka.Tensor
+
like(Tensor<V>) - Static method in interface neureka.Tensor
Use this factory method to instantiate a new tensor with the same data type, shape and memory location (Device instance) as the provided template tensor.
-
ListReader - Class in neureka.common.utility
+
ListReader - Class in neureka.common.utility
This is a simple utility class which traverses nested data structures and converts them into information which can be used to instantiate a tensor, namely: A flat data array, a shape array and a type class.
-
ListReader.Result - Class in neureka.common.utility
+
ListReader.Result - Class in neureka.common.utility
 
-
ln() - Method in class neureka.math.Functions
+
ln() - Method in class neureka.math.Functions
 
-
ln() - Method in interface neureka.Tensor
+
ln() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
load() - Method in class neureka.devices.file.CSVHandle
+
load(BackendRegistry) - Method in interface neureka.backend.api.ini.BackendLoader
+
 
+
load() - Method in class neureka.devices.file.CSVHandle
+
 
+
load(String) - Method in class neureka.devices.file.FileDevice
 
-
load() - Method in interface neureka.devices.file.FileHandle
+
load(String, Map<String, Object>) - Method in class neureka.devices.file.FileDevice
+
 
+
load() - Method in interface neureka.devices.file.FileHandle
An implementation of this method ought to create a new tensor instance containing the data which is stored in the file whose access this FileHandle manages.
-
load() - Method in class neureka.devices.file.IDXHandle
+
load() - Method in class neureka.devices.file.IDXHandle
 
-
load(String) - Method in class neureka.devices.file.FileDevice
+
LoadingContext - Interface in neureka.backend.api.ini
 
-
load(String, Map<String, Object>) - Method in class neureka.devices.file.FileDevice
+
loadProperties(Neureka) - Static method in class neureka.common.utility.SettingsLoader
 
-
load(BackendRegistry) - Method in interface neureka.backend.api.ini.BackendLoader
+
localMemSize() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
LoadingContext - Interface in neureka.backend.api.ini
+
localMemType() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
loadProperties(Neureka) - Static method in class neureka.common.utility.SettingsLoader
+
LOG10 - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
localMemSize() - Method in class neureka.devices.opencl.OpenCLDevice
+
Log10 - Class in neureka.backend.main.operations.functions
 
-
localMemType() - Method in class neureka.devices.opencl.OpenCLDevice
+
Log10() - Constructor for class neureka.backend.main.operations.functions.Log10
 
-
log10() - Method in class neureka.math.Functions
+
log10() - Method in class neureka.math.Functions
 
-
log10() - Method in interface neureka.Tensor
+
log10() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
Log10 - Class in neureka.backend.main.operations.functions
-
 
-
Log10() - Constructor for class neureka.backend.main.operations.functions.Log10
-
 
-
LOG10 - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
LOGARITHM - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Logarithm - Class in neureka.backend.main.operations.functions
+
Logarithm - Class in neureka.backend.main.operations.functions
 
-
Logarithm() - Constructor for class neureka.backend.main.operations.functions.Logarithm
+
Logarithm() - Constructor for class neureka.backend.main.operations.functions.Logarithm
 
-
LOGARITHM - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
-
 
-
LogUtil - Class in neureka.common.utility
+
LogUtil - Class in neureka.common.utility
A utility class for message formatting.
-
LogUtil() - Constructor for class neureka.common.utility.LogUtil
+
LogUtil() - Constructor for class neureka.common.utility.LogUtil
 
-
longToBigInteger(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
longToBigInteger(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
longToByte(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
longToByte(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
longToDouble(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
longToDouble(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
longToFloat(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
longToFloat(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
longToInt(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
longToInt(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
longToShort(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
longToShort(long[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-

M

-
-
makeFit(Tensor<?>[], boolean) - Static method in class neureka.backend.main.operations.other.Permute
-
 
-
makeSimple() - Static method in class neureka.devices.host.machine.Hardware
+ + + +

M

+
+
makeFit(Tensor<?>[], boolean) - Static method in class neureka.backend.main.operations.other.Permute
 
-
makeSimple(String, long, int) - Static method in class neureka.devices.host.machine.Hardware
+
makeSimple() - Static method in class neureka.devices.host.machine.Hardware
 
-
map(int) - Method in interface neureka.ndim.config.NDConfiguration.IndexToIndexFunction
+
makeSimple(String, long, int) - Static method in class neureka.devices.host.machine.Hardware
 
-
map(Function<Integer, Integer>) - Method in interface neureka.Shape
-
-
This method is used to transform a Shape into another Shape - by applying a function to it.
-
-
map(Function<V, V>) - Method in interface neureka.Nda.Item
+
map(Function<V, V>) - Method in interface neureka.Nda.Item
Maps this item to an optional value based on the provided lambda.
-
map(Function<V, V>) - Method in interface neureka.Nda
+
map(Function<V, V>) - Method in interface neureka.Nda
This method is a convenience method for mapping the items of this nd-array to another nd-array of the same type based on the provided lambda function, which will be applied to all items of this nd-array individually (element-wise).
-
map(Function<V, V>) - Method in interface neureka.Tensor
+
map(int) - Method in interface neureka.ndim.config.NDConfiguration.IndexToIndexFunction
+
 
+
map(Function<Integer, Integer>) - Method in interface neureka.Shape
+
+
This method is used to transform a Shape into another Shape + by applying a function to it.
+
+
map(Function<V, V>) - Method in interface neureka.Tensor
This method is a convenience method for mapping the items of this nd-array to another nd-array of the same type based on the provided lambda function, which will be applied to all items of this nd-array individually (element-wise).
-
mapTo(Class<T>, Function<V, T>) - Method in interface neureka.Nda
+
mapTo(Class<T>, Function<V, T>) - Method in interface neureka.Nda
This is a convenience method for mapping a nd-array to a nd-array of new type based on a provided target item type and mapping lambda.
-
mapTo(Class<T>, Function<V, T>) - Method in interface neureka.Tensor
+
mapTo(Class<T>, Function<V, T>) - Method in interface neureka.Tensor
This is a convenience method for mapping a nd-array to a nd-array of new type based on a provided target item type and mapping lambda.
-
matMul() - Method in class neureka.math.Functions
+
MatMul - Class in neureka.backend.main.operations.linear
+
 
+
MatMul() - Constructor for class neureka.backend.main.operations.linear.MatMul
 
-
matMul(Tensor<V>) - Method in interface neureka.Tensor
+
matMul() - Method in class neureka.math.Functions
+
 
+
matMul(Tensor<V>) - Method in interface neureka.Tensor
This will produce the matrix product of two tensors with rank 2 (matrices), where the left operand is this Tensor instance and the right operand is the argument passed to the method.
-
MatMul - Class in neureka.backend.main.operations.linear
+
MatMulAlgorithm - Class in neureka.backend.main.algorithms
 
-
MatMul() - Constructor for class neureka.backend.main.operations.linear.MatMul
+
MatMulAlgorithm() - Constructor for class neureka.backend.main.algorithms.MatMulAlgorithm
 
-
MatMulAlgorithm - Class in neureka.backend.main.algorithms
+
Max - Class in neureka.backend.main.operations.other
 
-
MatMulAlgorithm() - Constructor for class neureka.backend.main.algorithms.MatMulAlgorithm
+
Max() - Constructor for class neureka.backend.main.operations.other.Max
 
-
max() - Method in class neureka.math.Functions
+
max() - Method in class neureka.math.Functions
 
-
max() - Method in interface neureka.Tensor
+
max() - Method in interface neureka.Tensor
Calculate the max value of all values within this tensor and returns it in the form of a scalar tensor.
-
Max - Class in neureka.backend.main.operations.other
-
 
-
Max() - Constructor for class neureka.backend.main.operations.other.Max
+
maxAddressBits() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
MAX - Enum constant in enum class neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
+
maxClockFrequenzy() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
MAX - Enum constant in enum class neureka.backend.main.operations.other.internal.CPUReduce.Type
+
maxComputeUnits() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxAddressBits() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxConstantBufferSize() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxClockFrequenzy() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxConstantBufferSizeKB() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxComputeUnits() - Method in class neureka.devices.opencl.OpenCLDevice
-
 
-
maxConstantBufferSize() - Method in class neureka.devices.opencl.OpenCLDevice
-
 
-
maxConstantBufferSizeKB() - Method in class neureka.devices.opencl.OpenCLDevice
-
 
-
maxItem(Comparator<V>) - Method in interface neureka.Nda
+
maxItem(Comparator<V>) - Method in interface neureka.Nda
Returns the maximum item of this nd-array according to the provided - Comparator.
+ Comparator.
-
maxMemAllocSize() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxMemAllocSize() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxReadImageArgs() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxReadImageArgs() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxWorkGroupSize() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxWorkGroupSize() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxWorkItemSimensions() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxWorkItemSimensions() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxWorkItemSizes() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxWorkItemSizes() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
maxWriteImageArgs() - Method in class neureka.devices.opencl.OpenCLDevice
+
maxWriteImageArgs() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
mean() - Method in interface neureka.Tensor
+
mean() - Method in interface neureka.Tensor
Calculate the mean value of all values within this tensor and returns it in the form of a scalar tensor.
-
memory - Variable in class neureka.devices.host.machine.BasicMachine
+
memory - Variable in class neureka.devices.host.machine.BasicMachine
 
-
MemUtil - Class in neureka.backend.main.memory
+
MemUtil - Class in neureka.backend.main.memory
Utility methods for deleting tensors or preventing thereof.
-
MemValidator - Class in neureka.backend.main.memory
+
MemValidator - Class in neureka.backend.main.memory
This class validates the states of tensors with respect to memory management before and after a lambda executes a function or some kind of algorithm on said tensors.
-
Messages - Class in neureka.devices.opencl.utility
+
Messages - Class in neureka.devices.opencl.utility
+
 
+
Messages.Tips - Enum in neureka.devices.opencl.utility
+
 
+
Min - Class in neureka.backend.main.operations.other
 
-
Messages.Tips - Enum Class in neureka.devices.opencl.utility
+
Min() - Constructor for class neureka.backend.main.operations.other.Min
 
-
min() - Method in class neureka.math.Functions
+
min() - Method in class neureka.math.Functions
 
-
min() - Method in interface neureka.Tensor
+
min() - Method in interface neureka.Tensor
Calculate the min value of all values within this tensor and returns it in the form of a scalar tensor.
-
Min - Class in neureka.backend.main.operations.other
-
 
-
Min() - Constructor for class neureka.backend.main.operations.other.Min
-
 
-
MIN - Enum constant in enum class neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
-
 
-
MIN - Enum constant in enum class neureka.backend.main.operations.other.internal.CPUReduce.Type
-
 
-
minItem(Comparator<V>) - Method in interface neureka.Nda
+
minItem(Comparator<V>) - Method in interface neureka.Nda
Returns the minimum item of this nd-array according to the provided - Comparator.
+ Comparator.
-
minus() - Method in class neureka.math.Functions
+
minus() - Method in class neureka.math.Functions
 
-
minus(Tensor<V>) - Method in interface neureka.Tensor
+
minus(Tensor<V>) - Method in interface neureka.Tensor
Performs subtraction on two tensors with the same rank (or two ranks which can be made compatible with padding ones), where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
-
minus(V) - Method in interface neureka.Tensor
+
minus(V) - Method in interface neureka.Tensor
This method will create a new Tensor with the provided item subtracted from all elements of this Tensor.
-
minusAssign() - Method in class neureka.math.Functions
+
minusAssign() - Method in class neureka.math.Functions
 
-
minusAssign(Tensor<T>) - Method in interface neureka.MutateTensor
+
minusAssign(Tensor<T>) - Method in interface neureka.MutateTensor
 
-
minusAssign(T) - Method in interface neureka.MutateTensor
+
minusAssign(T) - Method in interface neureka.MutateTensor
 
-
mod() - Method in class neureka.math.Functions
+
mod() - Method in class neureka.math.Functions
 
-
mod(int) - Method in interface neureka.Tensor
-
 
-
mod(Tensor<V>) - Method in interface neureka.Tensor
+
mod(Tensor<V>) - Method in interface neureka.Tensor
Produces the modulus of two tensors with the same rank (or two ranks which can be made compatible with padding ones), where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
-
modAssign() - Method in class neureka.math.Functions
+
mod(int) - Method in interface neureka.Tensor
 
-
modAssign(Tensor<T>) - Method in interface neureka.MutateTensor
+
modAssign() - Method in class neureka.math.Functions
 
-
Modulo - Class in neureka.backend.main.operations.operator
+
modAssign(Tensor<T>) - Method in interface neureka.MutateTensor
 
-
Modulo() - Constructor for class neureka.backend.main.operations.operator.Modulo
+
Modulo - Class in neureka.backend.main.operations.operator
 
-
Momentum<V extends Number> - Class in neureka.optimization.implementations
+
Modulo() - Constructor for class neureka.backend.main.operations.operator.Modulo
 
-
Momentum - Static variable in interface neureka.optimization.Optimizer
+
Momentum<V extends java.lang.Number> - Class in neureka.optimization.implementations
 
-
MomentumFactory - Class in neureka.optimization.implementations
+
Momentum - Static variable in interface neureka.optimization.Optimizer
 
-
MomentumFactory() - Constructor for class neureka.optimization.implementations.MomentumFactory
+
MomentumFactory - Class in neureka.optimization.implementations
 
-
mul() - Method in class neureka.math.Functions
+
MomentumFactory() - Constructor for class neureka.optimization.implementations.MomentumFactory
 
-
mulAssign() - Method in class neureka.math.Functions
+
mul() - Method in class neureka.math.Functions
 
-
Multiplication - Class in neureka.backend.main.operations.operator
+
mulAssign() - Method in class neureka.math.Functions
 
-
Multiplication() - Constructor for class neureka.backend.main.operations.operator.Multiplication
+
Multiplication - Class in neureka.backend.main.operations.operator
 
-
multiply(double) - Method in interface neureka.Tensor
+
Multiplication() - Constructor for class neureka.backend.main.operations.operator.Multiplication
 
-
multiply(Tensor<V>) - Method in interface neureka.Tensor
+
multiply(Tensor<V>) - Method in interface neureka.Tensor
-
This method is synonymous to the Tensor.times(Tensor) method.
+
This method is synonymous to the Tensor.times(Tensor) method.
-
multiply(V) - Method in interface neureka.Tensor
+
multiply(V) - Method in interface neureka.Tensor
+
 
+
multiply(double) - Method in interface neureka.Tensor
 
-
mut() - Method in interface neureka.Nda
+
mut() - Method in interface neureka.Nda
This method exposes an API for mutating the state of this tensor.
-
mut() - Method in interface neureka.Tensor
+
mut() - Method in interface neureka.Tensor
This method exposes an API for mutating the state of this tensor.
-
MutateNda<T> - Interface in neureka
+
MutateNda<T> - Interface in neureka
Nd-arrays should be used as immutable data structures mostly, however sometimes it is important to mutate their state for performance reasons.
-
MutateNda.Item<V> - Interface in neureka
+
MutateNda.Item<V> - Interface in neureka
-
Instances of this are being returned by the Nda.at(int...) method, +
Instances of this are being returned by the Nda.at(int...) method, and they allow you to get or set individual nd-array items
-
MutateTensor<T> - Interface in neureka
+
MutateTensor<T> - Interface in neureka
Tensors should be considered immutable, however sometimes it is important to mutate their state for performance reasons.
-

N

-
-
name() - Method in class neureka.devices.opencl.OpenCLDevice
+ + + +

N

+
+
name() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
Nda<V> - Interface in neureka
+
Nda<V> - Interface in neureka
Nda, which is an abbreviation of 'N-Dimensional-Array', represents a multidimensional, homogeneously filled fixed-size array of items.
-
Nda.Item<V> - Interface in neureka
+
Nda.Item<V> - Interface in neureka
-
Instances of this are being returned by the Nda.at(int...) method, +
Instances of this are being returned by the Nda.at(int...) method, and they allow you to get individual nd-array items
-
NdaAsString - Class in neureka.view
+
NdaAsString - Class in neureka.view
This class is in essence a simple wrapper class for a tensor and a StringBuilder Methods in this class use the builder in order to construct a String representation for said tensor.
-
NdaAsString.Builder - Interface in neureka.view
+
NdaAsString.Builder - Interface in neureka.view
A builder interface providing multiple different options for building a NdaAsString instance in a fluent way.
-
NdaAsString.Util - Class in neureka.view
+
NdaAsString.Util - Class in neureka.view
This class is a simple utility class which contains a collection of static and stateless methods containing useful functionalities for tensor stringification.
-
NdaBuilder<V> - Class in neureka.fluent.building
+
NdaBuilder<V> - Class in neureka.fluent.building
This is the implementation of the fluent builder API for creating Nda/Tensor instances.
-
NdaBuilder(Class<V>) - Constructor for class neureka.fluent.building.NdaBuilder
+
NdaBuilder(Class<V>) - Constructor for class neureka.fluent.building.NdaBuilder
 
-
ndArrays(Consumer<NDPrintSettings>) - Method in class neureka.Neureka.Settings.View
+
ndArrays(Consumer<NDPrintSettings>) - Method in class neureka.Neureka.Settings.View
This allows you to provide a lambda to configure how tensors should be - converted to String instances.
+ converted to String instances.
-
NDConfiguration - Interface in neureka.ndim.config
+
NDConfiguration - Interface in neureka.ndim.config
This interface represents the access pattern configuration for the data array of a tensor.
-
NDConfiguration.IndexToIndexFunction - Interface in neureka.ndim.config
+
NDConfiguration.IndexToIndexFunction - Interface in neureka.ndim.config
-
Implementations of this are produced and returned by the NDConfiguration.getIndexToIndexAccessPattern() +
Implementations of this are produced and returned by the NDConfiguration.getIndexToIndexAccessPattern() and their purpose is to translate the item index of a tensor to the index of the item within the underlying data array of said tensor.
-
NDConfiguration.Layout - Enum Class in neureka.ndim.config
+
NDConfiguration.Layout - Enum in neureka.ndim.config
Types of common data layouts:
ROW_MAJOR
-
NDConfiguration.Utility - Class in neureka.ndim.config
+
NDConfiguration.Utility - Class in neureka.ndim.config
This utility class provides static methods which are helpful for nd-configuration related operations like reshaping, incrementing or decrementing index arrays...
-
NDConstructor - Interface in neureka.ndim
+
NDConstructor - Interface in neureka.ndim
 
-
NDConvolution - Class in neureka.backend.main.algorithms
+
NDConvolution - Class in neureka.backend.main.algorithms
 
-
NDConvolution() - Constructor for class neureka.backend.main.algorithms.NDConvolution
+
NDConvolution() - Constructor for class neureka.backend.main.algorithms.NDConvolution
 
-
NDFrame<V> - Class in neureka.framing
+
NDFrame<V> - Class in neureka.framing
Instances of this class are components of tensors, which store aliases for the indices of the tensor.
-
NDFrame(List<List<Object>>, Tensor<V>, String) - Constructor for class neureka.framing.NDFrame
+
NDFrame(List<List<Object>>, Tensor<V>, String) - Constructor for class neureka.framing.NDFrame
 
-
NDFrame(Map<Object, List<Object>>, Tensor<V>, String) - Constructor for class neureka.framing.NDFrame
+
NDFrame(Tensor<V>, String) - Constructor for class neureka.framing.NDFrame
 
-
NDFrame(Tensor<V>, String) - Constructor for class neureka.framing.NDFrame
+
NDFrame(Map<Object, List<Object>>, Tensor<V>, String) - Constructor for class neureka.framing.NDFrame
 
-
ndim() - Method in class neureka.Neureka.Settings
+
ndim() - Method in class neureka.Neureka.Settings
 
-
ndim(Object) - Method in class neureka.Neureka.Settings
+
ndim(Object) - Method in class neureka.Neureka.Settings
This allows you to configure Neureka using a Groovy DSL.
-
NDim() - Constructor for class neureka.Neureka.Settings.NDim
+
NDim() - Constructor for class neureka.Neureka.Settings.NDim
 
-
NDimensional - Interface in neureka.ndim
+
NDimensional - Interface in neureka.ndim
This interface defines the most essential methods of the nd-array/tensor API, which describe them with respect to their dimensionality.
-
NDIterator - Interface in neureka.ndim.iterator
+
NDIterator - Interface in neureka.ndim.iterator
An NDIterator is used to iterate over n-dimensional arrays.
-
NDIterator.NonVirtual - Enum Class in neureka.ndim.iterator
+
NDIterator.NonVirtual - Enum in neureka.ndim.iterator
Defines if a new NDIterator is allowed to be a VirtualNDIterator.
-
NDPrintSettings - Class in neureka.view
+
NDPrintSettings - Class in neureka.view
This is simply a mutable container for configuring how Tensor - instances ought to be converted to Strings.
+ instances ought to be converted to Strings.
-
NDPrintSettings(Supplier<Boolean>) - Constructor for class neureka.view.NDPrintSettings
+
NDPrintSettings(Supplier<Boolean>) - Constructor for class neureka.view.NDPrintSettings
 
-
NDTrait - Enum Class in neureka.ndim.config
+
NDTrait - Enum in neureka.ndim.config
 
-
NDUtil - Class in neureka.ndim
+
NDUtil - Class in neureka.ndim
Static utility methods for the NDArray.
-
NDUtil() - Constructor for class neureka.ndim.NDUtil
+
NDUtil() - Constructor for class neureka.ndim.NDUtil
 
-
neg() - Method in class neureka.math.Functions
+
neg() - Method in class neureka.math.Functions
 
-
neg() - Method in interface neureka.Tensor
+
neg() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
negative() - Method in interface neureka.Tensor
+
negative() - Method in interface neureka.Tensor
 
neureka - package neureka
 
-
Neureka - Class in neureka
+
Neureka - Class in neureka
Neureka is the key access point for thread local / global library settings ( seeNeureka.Settings) as well as execution contexts (see BackendContext) @@ -5856,1271 +5815,1280 @@

N

 
neureka.optimization.implementations - package neureka.optimization.implementations
 
-
Neureka.Settings - Class in neureka
+
Neureka.Settings - Class in neureka
This class hosts the settings of the Neureka instance which will be used throughout the library.
-
Neureka.Settings.AutoGrad - Class in neureka
+
Neureka.Settings.AutoGrad - Class in neureka
This class contains settings which are related to the automatic differentiation of tensors.
-
Neureka.Settings.Debug - Class in neureka
+
Neureka.Settings.Debug - Class in neureka
 
-
Neureka.Settings.DType - Class in neureka
+
Neureka.Settings.DType - Class in neureka
 
-
Neureka.Settings.NDim - Class in neureka
+
Neureka.Settings.NDim - Class in neureka
Settings for configuring the access pattern of nd-arrays/tensors.
-
Neureka.Settings.View - Class in neureka
+
Neureka.Settings.View - Class in neureka
-
Settings for configuring how objects should be converted to String representations.
+
Settings for configuring how objects should be converted to String representations.
-
Neureka.Utility - Class in neureka
+
Neureka.Utility - Class in neureka
 
neureka.view - package neureka.view
 
-
newChildToParent(Tensor<T>) - Static method in class neureka.framing.Relation
+
newChildToParent(Tensor<T>) - Static method in class neureka.framing.Relation
 
-
newInstance() - Static method in interface neureka.Tensor
+
newInstance() - Static method in interface neureka.Tensor
This static factory method creates and return a completely empty and undefined tensor which is void of any contents and meaning.
-
newParentToChildren() - Static method in class neureka.framing.Relation
+
newParentToChildren() - Static method in class neureka.framing.Relation
 
-
newReshaped(int[]) - Method in class neureka.ndim.config.AbstractNDC
+
newReshaped(int[]) - Method in class neureka.ndim.config.AbstractNDC
 
-
newReshaped(int[]) - Method in interface neureka.ndim.config.NDConfiguration
+
newReshaped(int[]) - Method in interface neureka.ndim.config.NDConfiguration
This method enables reshaping for NDConfiguration implementation instances.
-
newStridesFor(int[]) - Method in enum class neureka.ndim.config.NDConfiguration.Layout
+
newStridesFor(int[]) - Method in enum neureka.ndim.config.NDConfiguration.Layout
 
-
newTensorLike(Class<V>, Shape, boolean, Device<Object>, double) - Static method in class neureka.backend.main.operations.ElemWiseUtil
+
newTensorLike(Tensor<V>, double) - Static method in class neureka.backend.main.operations.ElemWiseUtil
 
-
newTensorLike(Tensor<V>, double) - Static method in class neureka.backend.main.operations.ElemWiseUtil
+
newTensorLike(Class<V>, Shape, boolean, Device<Object>, double) - Static method in class neureka.backend.main.operations.ElemWiseUtil
 
-
node() - Method in class neureka.autograd.ADTarget
+
node() - Method in class neureka.autograd.ADTarget
 
-
none(Predicate<V>) - Method in interface neureka.Nda
+
none() - Static method in interface neureka.Data
+
+
This is a static factory method which returns a Data object + which does not contain any data.
+
+
none(Predicate<V>) - Method in interface neureka.Nda
Iterates over every element of this nd-array, and checks whether none of the elements match the provided lambda.
-
NOT_GOOD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
none() - Static method in interface neureka.ndim.config.NDConfiguration
 
-
NOT_SUPPORTED - Enum constant in enum class neureka.backend.api.AutoDiffMode
+
NOT_GOOD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
noteFinished(GraphNode<V>) - Method in class neureka.autograd.JITProp
+
noteFinished(GraphNode<V>) - Method in class neureka.autograd.JITProp
 
-
nullArgCheck(T, String, Class<?>, String...) - Static method in class neureka.common.utility.LogUtil
+
nullArgCheck(T, String, Class<?>, String...) - Static method in class neureka.common.utility.LogUtil
 
-
numberOfArgs() - Method in interface neureka.math.Function
+
numberOfArgs() - Method in interface neureka.math.Function
 
-
numberOfBytes() - Method in class neureka.dtype.custom.F32
+
numberOfBytes() - Method in class neureka.dtype.custom.F32
 
-
numberOfBytes() - Method in class neureka.dtype.custom.F64
+
numberOfBytes() - Method in class neureka.dtype.custom.F64
 
-
numberOfBytes() - Method in class neureka.dtype.custom.I16
+
numberOfBytes() - Method in class neureka.dtype.custom.I16
 
-
numberOfBytes() - Method in class neureka.dtype.custom.I32
+
numberOfBytes() - Method in class neureka.dtype.custom.I32
 
-
numberOfBytes() - Method in class neureka.dtype.custom.I64
+
numberOfBytes() - Method in class neureka.dtype.custom.I64
 
-
numberOfBytes() - Method in class neureka.dtype.custom.I8
+
numberOfBytes() - Method in class neureka.dtype.custom.I8
 
-
numberOfBytes() - Method in class neureka.dtype.custom.UI16
+
numberOfBytes() - Method in class neureka.dtype.custom.UI16
 
-
numberOfBytes() - Method in class neureka.dtype.custom.UI32
+
numberOfBytes() - Method in class neureka.dtype.custom.UI32
 
-
numberOfBytes() - Method in class neureka.dtype.custom.UI64
+
numberOfBytes() - Method in class neureka.dtype.custom.UI64
 
-
numberOfBytes() - Method in class neureka.dtype.custom.UI8
+
numberOfBytes() - Method in class neureka.dtype.custom.UI8
 
-
numberOfBytes() - Method in interface neureka.dtype.NumericType
+
numberOfBytes() - Method in interface neureka.dtype.NumericType
 
-
numberOfChannels - Variable in enum class neureka.Tensor.ImageType
+
numberOfChannels - Variable in enum neureka.Tensor.ImageType
 
-
numberOfDataObjects() - Method in class neureka.devices.AbstractBaseDevice
+
numberOfDataObjects() - Method in class neureka.devices.AbstractBaseDevice
 
-
numberOfDataObjects() - Method in interface neureka.devices.Device
+
numberOfDataObjects() - Method in interface neureka.devices.Device
-
Note that this is not necessarily equal to Storage.numberOfStored(), because +
Note that this is not necessarily equal to Storage.numberOfStored(), because multiple tensors may share a single Data object.
-
numberOfOperationsWithin(List<String>) - Static method in class neureka.math.parsing.ParseUtil
+
numberOfOperationsWithin(List<String>) - Static method in class neureka.math.parsing.ParseUtil
 
-
numberOfStored() - Method in class neureka.devices.AbstractBaseDevice
+
numberOfStored() - Method in class neureka.devices.AbstractBaseDevice
 
-
numberOfStored() - Method in class neureka.devices.file.IDXHandle
+
numberOfStored() - Method in interface neureka.devices.Storage
 
-
numberOfStored() - Method in interface neureka.devices.Storage
-
 
-
NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType> - Interface in neureka.dtype
+
NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType> - Interface in neureka.dtype
This interface enables "Polymorphic" utility by defining common functionalities used for handling various numeric types.
-

O

-
-
objBooleansToPrimBooleans(Boolean[]) - Static method in class neureka.common.utility.DataConverter.Utility
+ + + +

O

+
+
objBooleansToPrimBooleans(Boolean[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objBytesToPrimBytes(Byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
objBytesToPrimBytes(Byte[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objCharsToPrimChars(Character[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
objCharsToPrimChars(Character[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objDoublesToPrimDoubles(Double[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
objDoublesToPrimDoubles(Double[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objectsToBytes(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
+
objectsToBytes(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objectsToDoubles(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
+
objectsToDoubles(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objectsToFloats(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
+
objectsToFloats(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objectsToInts(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
+
objectsToInts(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objectsToLongs(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
+
objectsToLongs(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objectsToShorts(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
+
objectsToShorts(Object[], int) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objFloatsToPrimFloats(Float[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
objFloatsToPrimFloats(Float[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objIntsToPrimInts(Integer[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
objIntsToPrimInts(Integer[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objLongsToPrimLongs(Long[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
objLongsToPrimLongs(Long[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
objShortsToPrimShorts(Short[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
objShortsToPrimShorts(Short[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
of(boolean...) - Static method in interface neureka.Data
+
of(ADAction) - Static method in interface neureka.autograd.ADAction
 
-
of(boolean...) - Static method in interface neureka.Nda
-
-
Constructs a vector of booleans based on the provided array.
-
-
of(boolean...) - Static method in interface neureka.Tensor
+
of(Tensor<?>, ADAction) - Static method in interface neureka.autograd.ADAction
+
 
+
of(Tensor<?>...) - Static method in class neureka.backend.api.ExecutionCall
-
Constructs a vector of booleans based on the provided array.
+
Use this factory method to build ExecutionCall instances in a readable fashion.
-
of(byte) - Static method in interface neureka.Tensor
+
of(ImplementationReceiver) - Static method in class neureka.backend.api.ini.BackendRegistry
 
-
of(byte...) - Static method in interface neureka.Data
+
of(Supplier<V>) - Static method in class neureka.backend.api.LazyRef
 
-
of(byte...) - Static method in interface neureka.Nda
-
-
Constructs a vector of bytes based on the provided array.
-
-
of(byte...) - Static method in interface neureka.Tensor
-
-
Constructs a vector of bytes based on the provided array.
-
-
of(char...) - Static method in interface neureka.Data
+
of(Tensor<?>) - Static method in class neureka.backend.api.Result
 
-
of(double) - Static method in interface neureka.Nda
+
of(Class<V>, V...) - Static method in interface neureka.Data
 
-
of(double) - Static method in interface neureka.Tensor
+
of(float...) - Static method in interface neureka.Data
 
-
of(double...) - Static method in interface neureka.Data
+
of(double...) - Static method in interface neureka.Data
 
-
of(double...) - Static method in interface neureka.Nda
-
-
Constructs a vector of doubles based on the provided array.
-
-
of(double...) - Static method in interface neureka.Tensor
-
-
Constructs a vector of doubles based on the provided array.
-
-
of(float) - Static method in interface neureka.Tensor
+
of(int...) - Static method in interface neureka.Data
 
-
of(float...) - Static method in interface neureka.Data
+
of(long...) - Static method in interface neureka.Data
 
-
of(float...) - Static method in interface neureka.Nda
-
-
Constructs a vector of floats based on the provided array.
-
-
of(float...) - Static method in interface neureka.Tensor
-
-
Constructs a vector of floats based on the provided array.
-
-
of(int) - Static method in class neureka.math.args.Arg.Axis
+
of(byte...) - Static method in interface neureka.Data
 
-
of(int) - Static method in class neureka.math.args.Arg.DerivIdx
+
of(short...) - Static method in interface neureka.Data
 
-
of(int) - Static method in class neureka.math.args.Arg.MinRank
+
of(boolean...) - Static method in interface neureka.Data
 
-
of(int) - Static method in class neureka.math.args.Arg.VarIdx
+
of(char...) - Static method in interface neureka.Data
 
-
of(int) - Static method in interface neureka.Tensor
+
of(String...) - Static method in interface neureka.Data
 
-
of(int...) - Static method in interface neureka.Data
+
of(Class<T>) - Static method in class neureka.dtype.DataType
 
-
of(int[]) - Static method in class neureka.math.args.Arg.Ends
+
of(int) - Static method in class neureka.math.args.Arg.Axis
 
-
of(int...) - Static method in class neureka.math.args.Arg.Indices
+
of(Tensor<V>) - Static method in class neureka.math.args.Arg.Derivative
 
-
of(int...) - Static method in class neureka.math.args.Arg.Offset
+
of(int) - Static method in class neureka.math.args.Arg.DerivIdx
 
-
of(int...) - Static method in class neureka.math.args.Arg.Shape
+
of(int[]) - Static method in class neureka.math.args.Arg.Ends
 
-
of(int...) - Static method in class neureka.math.args.Arg.Stride
+
of(int...) - Static method in class neureka.math.args.Arg.Indices
 
-
of(int...) - Static method in interface neureka.Nda
-
-
Constructs a vector of ints based on the provided array.
-
-
of(int...) - Static method in interface neureka.ndim.NDConstructor
+
of(NDConfiguration.Layout) - Static method in class neureka.math.args.Arg.Layout
 
-
of(int...) - Static method in interface neureka.Shape
-
-
This method is used to create a Shape instance from an array of integers.
-
-
of(int...) - Static method in interface neureka.Tensor
-
-
Constructs a vector of ints based on the provided array.
-
-
of(int[], int[], int[], int[], int[]) - Static method in interface neureka.ndim.config.NDConfiguration
+
of(int) - Static method in class neureka.math.args.Arg.MinRank
+
 
+
of(int...) - Static method in class neureka.math.args.Arg.Offset
+
 
+
of(String) - Static method in class neureka.math.args.Arg.Seed
+
 
+
of(long) - Static method in class neureka.math.args.Arg.Seed
+
 
+
of(int...) - Static method in class neureka.math.args.Arg.Shape
 
-
of(int[], int[], int[], int[], int[]) - Static method in interface neureka.ndim.NDConstructor
+
of(int...) - Static method in class neureka.math.args.Arg.Stride
 
-
of(long) - Static method in class neureka.math.args.Arg.Seed
+
of(Device<?>) - Static method in class neureka.math.args.Arg.TargetDevice
 
-
of(long) - Static method in interface neureka.Tensor
+
of(int) - Static method in class neureka.math.args.Arg.VarIdx
 
-
of(long...) - Static method in interface neureka.Data
+
of(Arg<?>...) - Static method in class neureka.math.args.Args
 
-
of(long...) - Static method in interface neureka.Nda
+
of(String) - Static method in interface neureka.math.Function
-
Constructs a vector of longs based on the provided array.
+
This static factory method will return Function instances + based on a provided mathematical String expression describing the function + using 'I[0]', 'I[1]', 'I[2]'...
-
of(long...) - Static method in interface neureka.Tensor
+
of(String, boolean) - Static method in interface neureka.math.Function
-
Constructs a vector of longs based on the provided array.
+
This static factory method will return Function instances + based on a provided mathematical String expression describing the function + using 'I[0]', 'I[1]', 'I[2]'...
-
of(short) - Static method in interface neureka.Tensor
+
of(String, boolean) - Static method in class neureka.math.implementations.FunctionInput
 
-
of(short...) - Static method in interface neureka.Data
+
of(Class<V>) - Static method in interface neureka.Nda
+
+
This is the entry point to the fluent nd-array builder API for building + Nda instances in a readable and type safe fashion.
+
+
of(double) - Static method in interface neureka.Nda
 
-
of(short...) - Static method in interface neureka.Nda
+
of(float...) - Static method in interface neureka.Nda
-
Constructs a vector of shorts based on the provided array.
+
Constructs a vector of floats based on the provided array.
-
of(short...) - Static method in interface neureka.Tensor
+
of(double...) - Static method in interface neureka.Nda
-
Constructs a vector of shorts based on the provided array.
+
Constructs a vector of doubles based on the provided array.
-
of(Class<T>) - Static method in class neureka.dtype.DataType
-
 
-
of(Class<T>, List<Object>) - Static method in interface neureka.Tensor
+
of(byte...) - Static method in interface neureka.Nda
-
This factory method will turn a list of values or nested lists of values into a Tensor - instance with the corresponding rank and shape and whose values - are of the provided type.
+
Constructs a vector of bytes based on the provided array.
-
of(Class<T>, Shape, Filler<T>) - Static method in interface neureka.Tensor
+
of(int...) - Static method in interface neureka.Nda
-
This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values.
+
Constructs a vector of ints based on the provided array.
-
of(Class<V>) - Static method in interface neureka.Nda
+
of(long...) - Static method in interface neureka.Nda
-
This is the entry point to the fluent nd-array builder API for building - Nda instances in a readable and type safe fashion.
+
Constructs a vector of longs based on the provided array.
-
of(Class<V>) - Static method in interface neureka.Tensor
+
of(short...) - Static method in interface neureka.Nda
-
This is the entry point to the fluent tensor builder API for building - Tensor instances in a readable and type safe fashion.
+
Constructs a vector of shorts based on the provided array.
-
of(Class<V>, List<Integer>, Object) - Static method in interface neureka.Tensor
+
of(boolean...) - Static method in interface neureka.Nda
-
Use this to construct and return a tensor of the specified type, shape and data object.
+
Constructs a vector of booleans based on the provided array.
-
of(Class<V>, List<Integer>, List<V>) - Static method in interface neureka.Tensor
+
of(T...) - Static method in interface neureka.Nda
-
Use this to construct and return a tensor of the specified type, shape and data object.
+
Constructs a vector of objects based on the provided array.
-
of(Class<V>, Shape, Number) - Static method in interface neureka.Tensor
+
of(Shape, double...) - Static method in interface neureka.Nda
-
Use this to construct and return a tensor of the specified type, shape and number.
+
Use this to construct and return a double based nd-array of the specified shape and initial values.
-
of(Class<V>, Shape, Object) - Static method in interface neureka.Tensor
+
of(Shape, float...) - Static method in interface neureka.Nda
-
Use this to construct and return a tensor of the specified type, shape and data object.
+
Use this to construct and return a float based nd-array of the specified shape and initial values.
-
of(Class<V>, Shape, List<V>) - Static method in interface neureka.Tensor
+
of(Shape, byte...) - Static method in interface neureka.Nda
-
Use this to construct and return a tensor of the specified type, shape and list of items.
+
Use this to construct and return a byte based nd-array of the specified shape and initial values.
-
of(Class<V>, Shape, Arg.Seed) - Static method in interface neureka.Tensor
+
of(Shape, int...) - Static method in interface neureka.Nda
-
Use this to construct and return a seeded tensor of the specified type.
+
Use this to construct and return a int based nd-array of the specified shape and initial values.
-
of(Class<V>, V...) - Static method in interface neureka.Data
-
 
-
of(Iterable<? extends Number>) - Static method in interface neureka.Shape
+
of(Shape, long...) - Static method in interface neureka.Nda
-
This method is used to create a Shape instance from an iterable of numbers - whose integer values are used to describe the shape of a nd-array.
+
Use this to construct and return a long based nd-array of the specified shape and initial values.
-
of(Iterable<T>) - Static method in interface neureka.Nda
+
of(Shape, short...) - Static method in interface neureka.Nda
-
Constructs a vector of objects based on the provided iterable.
+
Use this to construct and return a short based nd-array of the specified shape and initial values.
+
+
of(Shape, boolean...) - Static method in interface neureka.Nda
+
+
Use this to construct and return a boolean based nd-array of the specified shape and initial values.
-
of(Iterable<T>) - Static method in interface neureka.Tensor
+
of(Shape, T...) - Static method in interface neureka.Nda
+
+
Use this to construct and return an object based nd-array of the specified shape and initial values.
+
+
of(Iterable<T>) - Static method in interface neureka.Nda
Constructs a vector of objects based on the provided iterable.
-
of(Object...) - Static method in interface neureka.Tensor
+
of(List<T>) - Static method in interface neureka.Nda
-
This static Tensor factory method tries to interpret the provided - arguments to create the instance the use might wants.
+
Constructs a vector of objects based on the provided list.
-
of(String) - Static method in class neureka.math.args.Arg.Seed
+
of(int[], int[], int[], int[], int[]) - Static method in interface neureka.ndim.config.NDConfiguration
 
-
of(String) - Static method in interface neureka.math.Function
+
of(Tensor<?>) - Static method in interface neureka.ndim.iterator.NDIterator
-
This static factory method will return Function instances - based on a provided mathematical String expression describing the function - using 'I[0]', 'I[1]', 'I[2]'...
+
Use this to instantiate NDIterators optimized for the provided tensor.
-
of(String...) - Static method in interface neureka.Data
-
 
-
of(String, boolean) - Static method in interface neureka.math.Function
+
of(Tensor<?>, NDIterator.NonVirtual) - Static method in interface neureka.ndim.iterator.NDIterator
-
This static factory method will return Function instances - based on a provided mathematical String expression describing the function - using 'I[0]', 'I[1]', 'I[2]'...
+
Use this to instantiate NDIterators optimized for the provided tensor + which may not be allowed to be a VirtualNDIterator instance.
+
+
of(NDConfiguration, NDIterator.NonVirtual) - Static method in interface neureka.ndim.iterator.NDIterator
+
+
Use this to instantiate NDIterators optimized for the provided NDConfiguration + which may not be allowed to be a VirtualNDIterator instance.
-
of(String, boolean) - Static method in class neureka.math.implementations.FunctionInput
+
of(int[], int[], int[], int[], int[]) - Static method in interface neureka.ndim.NDConstructor
+
 
+
of(NDConfiguration) - Static method in interface neureka.ndim.NDConstructor
 
-
of(String, boolean, List<Tensor<V>>) - Static method in interface neureka.Tensor
+
of(Shape) - Static method in interface neureka.ndim.NDConstructor
+
 
+
of(int...) - Static method in interface neureka.ndim.NDConstructor
+
 
+
of(Optimization<T>) - Static method in interface neureka.optimization.Optimizer
+
 
+
of(List<? extends Number>) - Static method in interface neureka.Shape
-
This method takes a list of tensors and a String expression describing - operations which ought to be applied to the tensors in said list.
+
This method is used to create a Shape instance from a list of numbers + whose integer values are used to describe the shape of a nd-array.
-
of(String, boolean, Tensor<V>...) - Static method in interface neureka.Tensor
+
of(Stream<? extends Number>) - Static method in interface neureka.Shape
-
This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array.
+
This method is used to create a Shape instance from a stream of numbers + whose integer values are used to describe the shape of a nd-array.
-
of(String, List<Tensor<V>>) - Static method in interface neureka.Tensor
+
of(Iterable<? extends Number>) - Static method in interface neureka.Shape
-
This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.
+
This method is used to create a Shape instance from an iterable of numbers + whose integer values are used to describe the shape of a nd-array.
+
+
of(int...) - Static method in interface neureka.Shape
+
+
This method is used to create a Shape instance from an array of integers.
-
of(String, Tensor<T>, char, Tensor<T>, String) - Static method in interface neureka.Tensor
+
of(Tensor<T>, char, Tensor<T>) - Static method in interface neureka.Tensor
Use this to conveniently operate on 2 tensors.
-
of(String, Tensor<T>, String) - Static method in interface neureka.Tensor
+
of(Tensor<T>, char, Tensor<T>, char, Tensor<T>) - Static method in interface neureka.Tensor
+
+
Use this to conveniently operate on 3 tensors.
+
+
of(String, Tensor<T>, String) - Static method in interface neureka.Tensor
Use this to conveniently operate on a tensor.
-
of(String, Tensor<T>, String, Tensor<T>, String, Tensor<T>, String) - Static method in interface neureka.Tensor
+
of(String, Tensor<T>, char, Tensor<T>, String) - Static method in interface neureka.Tensor
+
+
Use this to conveniently operate on 2 tensors.
+
+
of(String, Tensor<T>, String, Tensor<T>, String, Tensor<T>, String) - Static method in interface neureka.Tensor
Use this to conveniently operate on 3 tensors.
-
of(String, Tensor<V>) - Static method in interface neureka.Tensor
+
of(Object...) - Static method in interface neureka.Tensor
-
This method takes a tensor and a String expression describing - operations which ought to be applied to said tensor.
+
This static Tensor factory method tries to interpret the provided + arguments to create the instance the use might wants.
-
of(String, Tensor<V>...) - Static method in interface neureka.Tensor
+
of(Iterable<T>) - Static method in interface neureka.Tensor
-
This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array.
+
Constructs a vector of objects based on the provided iterable.
-
of(String, V...) - Static method in interface neureka.Tensor
+
of(List<Integer>, T) - Static method in interface neureka.Tensor
-
This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.
+
This is a convenient factory method for creating Tensor instances for + values of type T based on a list of integers + defining a shape made up of axes sizes as well as a scalar value of type T + which will fill out the data array spanned by the provided shape information.
-
of(Supplier<V>) - Static method in class neureka.backend.api.LazyRef
-
 
-
of(List<? extends Number>) - Static method in interface neureka.Shape
+
of(Shape, T) - Static method in interface neureka.Tensor
-
This method is used to create a Shape instance from a list of numbers - whose integer values are used to describe the shape of a nd-array.
+
This is a convenient factory method for creating Tensor instances for + representing items of type T.
-
of(List<? extends Number>, String) - Static method in interface neureka.Tensor
+
of(List<? extends Number>, String) - Static method in interface neureka.Tensor
This factory method will create and return a Tensor instance - based on a list of Number instances whose rounded values will be interpreted as + based on a list of Number instances whose rounded values will be interpreted as the shape of this new Tensor instance and a seed which will serve as a source of pseudo randomness to generate the values for the new instance.
-
of(List<? extends Number>, List<V>) - Static method in interface neureka.Tensor
+
of(List<? extends Number>, List<V>) - Static method in interface neureka.Tensor
Creates a new Tensor instance based on a list of numbers representing the shape, and a list of values representing the value of the resulting tensor.
-
of(List<Integer>, T) - Static method in interface neureka.Tensor
+
of(Shape, List<V>) - Static method in interface neureka.Tensor
-
This is a convenient factory method for creating Tensor instances for - values of type Tensor based on a list of integers - defining a shape made up of axes sizes as well as a scalar value of type Tensor - which will fill out the data array spanned by the provided shape information.
+
Creates a new Tensor instance based on a shape tuple of numbers representing the nd-array shape, + and a list of items representing the value of the resulting tensor.
-
of(List<Object>) - Static method in interface neureka.Tensor
+
of(List<Object>) - Static method in interface neureka.Tensor
This factory method will turn a list of values or nested lists of values into a Tensor instance with the corresponding rank and shape.
-
of(List<T>) - Static method in interface neureka.Nda
+
of(Class<T>, List<Object>) - Static method in interface neureka.Tensor
-
Constructs a vector of objects based on the provided list.
+
This factory method will turn a list of values or nested lists of values into a Tensor + instance with the corresponding rank and shape and whose values + are of the provided type.
-
of(Stream<? extends Number>) - Static method in interface neureka.Shape
+
of(Class<V>) - Static method in interface neureka.Tensor
-
This method is used to create a Shape instance from a stream of numbers - whose integer values are used to describe the shape of a nd-array.
+
This is the entry point to the fluent tensor builder API for building + Tensor instances in a readable and type safe fashion.
+
+
of(double...) - Static method in interface neureka.Tensor
+
+
Constructs a vector of doubles based on the provided array.
-
of(ADAction) - Static method in interface neureka.autograd.ADAction
+
of(double) - Static method in interface neureka.Tensor
 
-
of(ImplementationReceiver) - Static method in class neureka.backend.api.ini.BackendRegistry
+
of(float...) - Static method in interface neureka.Tensor
+
+
Constructs a vector of floats based on the provided array.
+
+
of(float) - Static method in interface neureka.Tensor
 
-
of(Device<?>) - Static method in class neureka.math.args.Arg.TargetDevice
+
of(byte...) - Static method in interface neureka.Tensor
+
+
Constructs a vector of bytes based on the provided array.
+
+
of(byte) - Static method in interface neureka.Tensor
 
-
of(DataType<T>, List<Integer>, Filler<T>) - Static method in interface neureka.Tensor
+
of(int...) - Static method in interface neureka.Tensor
-
This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values.
+
Constructs a vector of ints based on the provided array.
-
of(DataType<T>, Shape, Filler<T>) - Static method in interface neureka.Tensor
+
of(int) - Static method in interface neureka.Tensor
+
 
+
of(long...) - Static method in interface neureka.Tensor
-
This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values.
+
Constructs a vector of longs based on the provided array.
-
of(DataType<V>, List<Integer>, List<V>) - Static method in interface neureka.Tensor
+
of(long) - Static method in interface neureka.Tensor
+
 
+
of(short...) - Static method in interface neureka.Tensor
-
Use this to construct and return a tensor of the specified type, shape and data object.
+
Constructs a vector of shorts based on the provided array.
-
of(DataType<V>, Device<N>, Shape, Object) - Static method in interface neureka.Tensor
+
of(short) - Static method in interface neureka.Tensor
+
 
+
of(boolean...) - Static method in interface neureka.Tensor
-
This factory method is among the most flexible and forgiving ways to create a Tensor instance.
+
Constructs a vector of booleans based on the provided array.
-
of(DataType<V>, NDConstructor, Data<V>) - Static method in interface neureka.Tensor
+
of(Class<V>, Shape, Arg.Seed) - Static method in interface neureka.Tensor
-
This factory method a raw tensor constructor which will not perform any type checking - or data conversion on the data provided to it.
+
Use this to construct and return a seeded tensor of the specified type.
-
of(DataType<V>, Shape) - Static method in interface neureka.Tensor
+
of(Shape, double) - Static method in interface neureka.Tensor
-
Use this to construct and return a tensor of the specified type and shape.
+
Use this to construct and return a homogeneously populated double tensor of the specified shape.
-
of(DataType<V>, Shape, Object) - Static method in interface neureka.Tensor
+
of(Shape, double[]) - Static method in interface neureka.Tensor
-
This factory method is among the most flexible and forgiving ways to create a Tensor instance.
+
Use this to construct and return a double tensor of the specified shape and initial values.
-
of(DataType<V>, Shape, List<V>) - Static method in interface neureka.Tensor
+
of(Shape, int[]) - Static method in interface neureka.Tensor
-
Use this to construct and return a tensor of the specified type, shape and a list of items.
+
Use this to construct and return an int tensor of the specified shape and initial values.
-
of(Arg<?>...) - Static method in class neureka.math.args.Args
-
 
-
of(NDConfiguration) - Static method in interface neureka.ndim.NDConstructor
-
 
-
of(NDConfiguration.Layout) - Static method in class neureka.math.args.Arg.Layout
-
 
-
of(NDConfiguration, NDIterator.NonVirtual) - Static method in interface neureka.ndim.iterator.NDIterator
+
of(Shape, byte[]) - Static method in interface neureka.Tensor
-
Use this to instantiate NDIterators optimized for the provided NDConfiguration - which may not be allowed to be a VirtualNDIterator instance.
+
Use this to construct and return a byte tensor of the specified shape and initial values.
-
of(Optimization<T>) - Static method in interface neureka.optimization.Optimizer
-
 
-
of(Shape) - Static method in interface neureka.ndim.NDConstructor
-
 
-
of(Shape, boolean...) - Static method in interface neureka.Nda
+
of(Shape, long[]) - Static method in interface neureka.Tensor
-
Use this to construct and return a boolean based nd-array of the specified shape and initial values.
+
Use this to construct and return a long tensor of the specified shape and initial values.
-
of(Shape, boolean[]) - Static method in interface neureka.Tensor
+
of(Shape, short[]) - Static method in interface neureka.Tensor
-
Use this to construct and return a boolean tensor of the specified shape and initial values.
+
Use this to construct and return a short tensor of the specified shape and initial values.
-
of(Shape, byte...) - Static method in interface neureka.Nda
+
of(Shape, float[]) - Static method in interface neureka.Tensor
-
Use this to construct and return a byte based nd-array of the specified shape and initial values.
+
Use this to construct and return a float tensor of the specified shape and initial values.
-
of(Shape, byte[]) - Static method in interface neureka.Tensor
+
of(Shape, float) - Static method in interface neureka.Tensor
-
Use this to construct and return a byte tensor of the specified shape and initial values.
+
Use this to construct and return a homogeneously populated float tensor of the specified shape.
-
of(Shape, double) - Static method in interface neureka.Tensor
+
of(Shape, boolean[]) - Static method in interface neureka.Tensor
-
Use this to construct and return a homogeneously populated double tensor of the specified shape.
+
Use this to construct and return a boolean tensor of the specified shape and initial values.
-
of(Shape, double...) - Static method in interface neureka.Nda
+
of(Shape, Data<V>) - Static method in interface neureka.Tensor
-
Use this to construct and return a double based nd-array of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified shape and data object.
+ This method is typically used like this:
-
of(Shape, double[]) - Static method in interface neureka.Tensor
+
of(DataType<V>, Shape) - Static method in interface neureka.Tensor
-
Use this to construct and return a double tensor of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified type and shape.
-
of(Shape, float) - Static method in interface neureka.Tensor
+
of(Class<V>, Shape, Object) - Static method in interface neureka.Tensor
-
Use this to construct and return a homogeneously populated float tensor of the specified shape.
+
Use this to construct and return a tensor of the specified type, shape and data object.
-
of(Shape, float...) - Static method in interface neureka.Nda
+
of(Class<V>, List<Integer>, Object) - Static method in interface neureka.Tensor
-
Use this to construct and return a float based nd-array of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified type, shape and data object.
-
of(Shape, float[]) - Static method in interface neureka.Tensor
+
of(Class<V>, Shape, Number) - Static method in interface neureka.Tensor
-
Use this to construct and return a float tensor of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified type, shape and number.
-
of(Shape, int...) - Static method in interface neureka.Nda
+
of(Class<V>, List<Integer>, List<V>) - Static method in interface neureka.Tensor
-
Use this to construct and return a int based nd-array of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified type, shape and data object.
-
of(Shape, int[]) - Static method in interface neureka.Tensor
+
of(Class<V>, Shape, List<V>) - Static method in interface neureka.Tensor
-
Use this to construct and return an int tensor of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified type, shape and list of items.
-
of(Shape, long...) - Static method in interface neureka.Nda
+
of(DataType<V>, List<Integer>, List<V>) - Static method in interface neureka.Tensor
-
Use this to construct and return a long based nd-array of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified type, shape and data object.
-
of(Shape, long[]) - Static method in interface neureka.Tensor
+
of(DataType<V>, Shape, List<V>) - Static method in interface neureka.Tensor
-
Use this to construct and return a long tensor of the specified shape and initial values.
+
Use this to construct and return a tensor of the specified type, shape and a list of items.
-
of(Shape, short...) - Static method in interface neureka.Nda
+
of(DataType<V>, Shape, Object) - Static method in interface neureka.Tensor
-
Use this to construct and return a short based nd-array of the specified shape and initial values.
+
This factory method is among the most flexible and forgiving ways to create a Tensor instance.
-
of(Shape, short[]) - Static method in interface neureka.Tensor
+
of(DataType<V>, Device<N>, Shape, Object) - Static method in interface neureka.Tensor
-
Use this to construct and return a short tensor of the specified shape and initial values.
+
This factory method is among the most flexible and forgiving ways to create a Tensor instance.
-
of(Shape, List<V>) - Static method in interface neureka.Tensor
+
of(DataType<V>, NDConstructor, Data<V>) - Static method in interface neureka.Tensor
-
Creates a new Tensor instance based on a shape tuple of numbers representing the nd-array shape, - and a list of items representing the value of the resulting tensor.
+
This factory method a raw tensor constructor which will not perform any type checking + or data conversion on the data provided to it.
-
of(Shape, Data<V>) - Static method in interface neureka.Tensor
+
of(DataType<T>, List<Integer>, Filler<T>) - Static method in interface neureka.Tensor
-
Use this to construct and return a tensor of the specified shape and data object.
- This method is typically used like this:
+
This factory method allows the creation of tensors with an additional initialization + lambda for filling the underlying data array with desired values.
-
of(Shape, T) - Static method in interface neureka.Tensor
+
of(DataType<T>, Shape, Filler<T>) - Static method in interface neureka.Tensor
-
This is a convenient factory method for creating Tensor instances for - representing items of type Tensor.
+
This factory method allows the creation of tensors with an additional initialization + lambda for filling the underlying data array with desired values.
-
of(Shape, T...) - Static method in interface neureka.Nda
+
of(Class<T>, Shape, Filler<T>) - Static method in interface neureka.Tensor
-
Use this to construct and return an object based nd-array of the specified shape and initial values.
+
This factory method allows the creation of tensors with an additional initialization + lambda for filling the underlying data array with desired values.
-
of(Tensor<?>) - Static method in class neureka.backend.api.Result
-
 
-
of(Tensor<?>) - Static method in interface neureka.ndim.iterator.NDIterator
+
of(String, V...) - Static method in interface neureka.Tensor
-
Use this to instantiate NDIterators optimized for the provided tensor.
+
This factory method allows for the creation and execution of Function instances + without actually instantiating them manually, + where the result will then be returned by this factory method.
-
of(Tensor<?>...) - Static method in class neureka.backend.api.ExecutionCall
+
of(String, List<Tensor<V>>) - Static method in interface neureka.Tensor
-
Use this factory method to build ExecutionCall instances in a readable fashion.
+
This factory method allows for the creation and execution of Function instances + without actually instantiating them manually, + where the result will then be returned by this factory method.
-
of(Tensor<?>, ADAction) - Static method in interface neureka.autograd.ADAction
-
 
-
of(Tensor<?>, NDIterator.NonVirtual) - Static method in interface neureka.ndim.iterator.NDIterator
+
of(String, boolean, List<Tensor<V>>) - Static method in interface neureka.Tensor
-
Use this to instantiate NDIterators optimized for the provided tensor - which may not be allowed to be a VirtualNDIterator instance.
+
This method takes a list of tensors and a String expression describing + operations which ought to be applied to the tensors in said list.
-
of(Tensor<T>, char, Tensor<T>) - Static method in interface neureka.Tensor
+
of(String, Tensor<V>) - Static method in interface neureka.Tensor
-
Use this to conveniently operate on 2 tensors.
+
This method takes a tensor and a String expression describing + operations which ought to be applied to said tensor.
-
of(Tensor<T>, char, Tensor<T>, char, Tensor<T>) - Static method in interface neureka.Tensor
+
of(String, Tensor<V>...) - Static method in interface neureka.Tensor
-
Use this to conveniently operate on 3 tensors.
+
This method takes an array of tensors and a String expression describing + operations which ought to be applied to the tensors in said array.
-
of(Tensor<V>) - Static method in class neureka.math.args.Arg.Derivative
-
 
-
of(T...) - Static method in interface neureka.Nda
+
of(String, boolean, Tensor<V>...) - Static method in interface neureka.Tensor
-
Constructs a vector of objects based on the provided array.
+
This method takes an array of tensors and a String expression describing + operations which ought to be applied to the tensors in said array.
-
ofAny(Class<V>, Shape, Object) - Static method in interface neureka.Tensor
+
ofAny(Class<V>, Shape, Object) - Static method in interface neureka.Tensor
Use this to construct and return a tensor of the specified type, shape and data object.
-
ofBigDecimals() - Static method in interface neureka.Nda
+
ofBigDecimals() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(BigDecimal.class) - used to build Ndas storing BigDecimals.
+ used to build Ndas storing BigDecimals.
-
ofBooleans() - Static method in interface neureka.Nda
+
ofBooleans() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Boolean.class) - used to build Ndas storing Booleans.
+ used to build Ndas storing Booleans.
-
ofBytes() - Static method in interface neureka.Nda
+
ofBytes() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Byte.class) - used to build Ndas storing Bytes.
+ used to build Ndas storing Bytes.
-
ofBytes() - Static method in interface neureka.Tensor
+
ofBytes() - Static method in interface neureka.Tensor
-
This is a simple convenience method which is simply calling the Tensor.of(Class) +
This is a simple convenience method which is simply calling the Tensor.of(Class) method like so: of(Byte.class).
-
ofChars() - Static method in interface neureka.Nda
+
ofChars() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Character.class) - used to build Ndas storing Characters.
+ used to build Ndas storing Characters.
-
ofDoubles() - Static method in interface neureka.Nda
+
ofDoubles() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Double.class) - used to build Ndas storing Doubles.
+ used to build Ndas storing Doubles.
-
ofDoubles() - Static method in interface neureka.Tensor
+
ofDoubles() - Static method in interface neureka.Tensor
-
This is a simple convenience method which is simply calling the Tensor.of(Class) +
This is a simple convenience method which is simply calling the Tensor.of(Class) method like so: of(Double.class).
-
ofFloats() - Static method in interface neureka.Nda
+
ofFloats() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Float.class) - used to build Ndas storing Floats.
+ used to build Ndas storing Floats.
-
ofFloats() - Static method in interface neureka.Tensor
+
ofFloats() - Static method in interface neureka.Tensor
-
This is a simple convenience method which is simply calling the Tensor.of(Class) +
This is a simple convenience method which is simply calling the Tensor.of(Class) method like so: of(Float.class).
-
offset() - Method in interface neureka.ndim.config.NDConfiguration
+
offset() - Method in interface neureka.ndim.config.NDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
offset(int) - Method in interface neureka.ndim.config.NDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
offset() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
offset() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
offset() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
offset() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
offset() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
offset() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
offset() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
offset(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
offset() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset() - Method in interface neureka.ndim.NDimensional
+
offset(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
The offset is the position of a slice within the n-dimensional - data array of its parent array.
+ data array of its parent tensor.
-
offset(int) - Method in interface neureka.ndim.config.NDConfiguration
+
offset() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
offset() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
offset() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
offset() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
offset() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
offset() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
offset() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
offset(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
offset() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
offset(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
The offset is the position of a slice within the n-dimensional data array of its parent tensor.
-
offset(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
offset() - Method in interface neureka.ndim.NDimensional
The offset is the position of a slice within the n-dimensional - data array of its parent tensor.
+ data array of its parent array.
-
OFFSET_MATRIX - Enum constant in enum class neureka.ndim.config.NDTrait
+
ofGradient(Optimization<T>) - Static method in interface neureka.optimization.Optimizer
 
-
ofGradient(Optimization<T>) - Static method in interface neureka.optimization.Optimizer
-
 
-
ofInts() - Static method in interface neureka.Nda
+
ofInts() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Integer.class) - used to build Ndas storing Integers.
+ used to build Ndas storing Integers.
-
ofInts() - Static method in interface neureka.Tensor
+
ofInts() - Static method in interface neureka.Tensor
-
This is a simple convenience method which is simply calling the Tensor.of(Class) +
This is a simple convenience method which is simply calling the Tensor.of(Class) method like so: of(Integer.class).
-
ofLongs() - Static method in interface neureka.Nda
+
ofLongs() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Long.class) - used to build Ndas storing Longs.
+ used to build Ndas storing Longs.
-
ofNumbers() - Static method in interface neureka.Nda
+
ofNumbers() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Number.class) - used to build Ndas storing Numbers.
+ used to build Ndas storing Numbers.
-
ofObjects() - Static method in interface neureka.Nda
+
ofObjects() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Object.class) - used to build Ndas storing Objects.
+ used to build Ndas storing Objects.
-
ofRandom(Class<V>, int...) - Static method in interface neureka.Tensor
+
ofRandom(Class<V>, int...) - Static method in interface neureka.Tensor
This factory method produces a randomly populated tensor of the provided type and shape using a hard coded default seed.
-
ofShorts() - Static method in interface neureka.Nda
+
ofShorts() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(Short.class) - used to build Ndas storing Shorts.
+ used to build Ndas storing Shorts.
-
ofShorts() - Static method in interface neureka.Tensor
+
ofShorts() - Static method in interface neureka.Tensor
-
This is a simple convenience method which is simply calling the Tensor.of(Class) +
This is a simple convenience method which is simply calling the Tensor.of(Class) method like so: of(Short.class).
-
ofStrings() - Static method in interface neureka.Nda
+
ofStrings() - Static method in interface neureka.Nda
This is a shortcut method for Nda.of(String.class) - used to build Ndas storing Strings.
+ used to build Ndas storing Strings.
-
OKAY - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
OKAY - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
on(D) - Method in class neureka.backend.api.ExecutionCall.Builder
+
on(D) - Method in class neureka.backend.api.ExecutionCall.Builder
 
-
on(Device<V>) - Method in class neureka.fluent.building.NdaBuilder
+
on(Device<? super V>) - Method in class neureka.fluent.building.NdaBuilder
 
-
on(Device<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorOnDevice
+
on(Device<? super V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorOnDevice
Use this to specify the type onto which the tensor should be stored.
-
ONE - Enum constant in enum class neureka.devices.host.concurrent.Parallelism
-
-
1
-
-
OpenCLDevice - Class in neureka.devices.opencl
+
OpenCLDevice - Class in neureka.devices.opencl
This class models OpenCL supporting accelerator hardware like GPUs or FPGAs for storing tensors and executing operations on them.
-
OpenCLDevice.Query - Class in neureka.devices.opencl
+
OpenCLDevice.Query - Class in neureka.devices.opencl
 
-
OpenCLDevice.Type - Enum Class in neureka.devices.opencl
+
OpenCLDevice.Type - Enum in neureka.devices.opencl
 
-
OpenCLPlatform - Class in neureka.devices.opencl
+
OpenCLPlatform - Class in neureka.devices.opencl
This class models the OpenCL concept of platforms, which refer to device vendors / or vendor OpenCL runtime drivers.
-
OpenCLPlatform(cl_platform_id) - Constructor for class neureka.devices.opencl.OpenCLPlatform
+
OpenCLPlatform(cl_platform_id) - Constructor for class neureka.devices.opencl.OpenCLPlatform
 
-
Operation - Interface in neureka.backend.api
+
Operation - Interface in neureka.backend.api
This interface is part of the backend API, and it embodies the top layer of the 3 tier backend architecture.
-
OperationBuilder - Class in neureka.backend.api.template.operations
+
OperationBuilder - Class in neureka.backend.api.template.operations
This builder class builds instances of the Operation interface.
-
OperationBuilder() - Constructor for class neureka.backend.api.template.operations.OperationBuilder
+
OperationBuilder() - Constructor for class neureka.backend.api.template.operations.OperationBuilder
 
-
OperationBuilder.Derivation - Interface in neureka.backend.api.template.operations
+
OperationBuilder.Derivation - Interface in neureka.backend.api.template.operations
 
-
OperationBuilder.Stringifier - Interface in neureka.backend.api.template.operations
+
OperationBuilder.Stringifier - Interface in neureka.backend.api.template.operations
 
-
operationForF32(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.GEMM
+
operationForF32(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.GEMM
 
-
operationForF64(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.GEMM
+
operationForF64(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.GEMM
 
-
operationForI32(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.IGEMM
+
operationForI32(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.IGEMM
 
-
operationForI64(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.IGEMM
+
operationForI64(boolean, long, long) - Static method in class neureka.backend.main.operations.linear.internal.blas.IGEMM
 
-
operationName() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
operationName() - Method in class neureka.backend.api.template.operations.AbstractOperation
Override this if you want your operation to have a string representation with a custom prefix which is something other than the simple class name!
-
operator(String) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
operator(String) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
Optimization<V> - Interface in neureka.optimization
+
Optimization<V> - Interface in neureka.optimization
 
-
optimize() - Method in class neureka.devices.opencl.utility.CLFunctionCompiler
+
optimize() - Method in class neureka.devices.opencl.utility.CLFunctionCompiler
 
-
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.AdaGrad
+
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.AdaGrad
 
-
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.ADAM
+
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.ADAM
 
-
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.Momentum
+
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.Momentum
 
-
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.RMSProp
+
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.RMSProp
 
-
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.SGD
+
optimize(Tensor<V>) - Method in class neureka.optimization.implementations.SGD
 
-
optimize(Tensor<V>) - Method in interface neureka.optimization.Optimization
+
optimize(Tensor<V>) - Method in interface neureka.optimization.Optimization
 
-
optimizedFunctionOf(Function, String) - Method in interface neureka.devices.Device
+
optimizedFunctionOf(Function, String) - Method in interface neureka.devices.Device
This method tries to allow this device to produce an optimized Function based on the provided function.
-
optimizedOperationOf(Function, String) - Method in interface neureka.devices.Device
+
optimizedOperationOf(Function, String) - Method in interface neureka.devices.Device
This method tries to allow this device to produce an optimized Operation based on the provided function.
-
optimizedOperationOf(Function, String) - Method in class neureka.devices.file.FileDevice
+
optimizedOperationOf(Function, String) - Method in class neureka.devices.file.FileDevice
 
-
optimizedOperationOf(Function, String) - Method in class neureka.devices.host.CPU
+
optimizedOperationOf(Function, String) - Method in class neureka.devices.host.CPU
 
-
optimizedOperationOf(Function, String) - Method in class neureka.devices.opencl.OpenCLDevice
+
optimizedOperationOf(Function, String) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
Optimizer<V> - Interface in neureka.optimization
+
Optimizer<V> - Interface in neureka.optimization
Optimizers are tensor components which implement the Optimization (functional) interface applying various optimization algorithms to the gradients of tensors.
-
OptimizerFactory - Interface in neureka.optimization
+
OptimizerFactory - Interface in neureka.optimization
 
-
orElse(T) - Method in interface neureka.backend.api.Call.Else
+
orElse(T) - Method in interface neureka.backend.api.Call.Else
 
-
orElse(V) - Method in interface neureka.Nda.Item
+
orElse(V) - Method in interface neureka.Nda.Item
Get the value at the targeted position or return the provided default value if the item does not exist.
-
orElseNull() - Method in interface neureka.Nda.Item
+
orElseNull() - Method in interface neureka.Nda.Item
Get the value at the targeted position or return null if the item does not exist.
-
OS_MEMORY_PAGE_SIZE - Static variable in class neureka.devices.host.machine.Hardware
+
OS_MEMORY_PAGE_SIZE - Static variable in class neureka.devices.host.machine.Hardware
Practically all architectures/OS:s have a page size of 4k (one notable exception is Solaris/SPARC that have 8k)
-
owner() - Method in interface neureka.Data
+
owner() - Method in interface neureka.Data
 
-
owner() - Method in class neureka.devices.AbstractDeviceData
+
owner() - Method in class neureka.devices.AbstractDeviceData
 
-

P

-
-
pad(int, String) - Static method in class neureka.view.NdaAsString.Util
-
 
-
pad(String, int) - Static method in class neureka.view.NdaAsString.Util
+ + + +

P

+
+
pad(int, String) - Static method in class neureka.view.NdaAsString.Util
 
-
parallelism(IntSupplier) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
+
pad(String, int) - Static method in class neureka.view.NdaAsString.Util
 
-
Parallelism - Enum Class in neureka.devices.host.concurrent
+
Parallelism - Enum in neureka.devices.host.concurrent
A set of standard levels of parallelism derived from the number of available cores and optionally capped by reserving a specified amount of memory per thread.
-
PARALLELIZATION_THRESHOLD - Static variable in class neureka.devices.host.CPU
+
parallelism(IntSupplier) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
 
-
parse(String, boolean) - Method in class neureka.math.parsing.FunctionParser
+
PARALLELIZATION_THRESHOLD - Static variable in class neureka.devices.host.CPU
 
-
parse(Operation, int, boolean) - Method in class neureka.math.parsing.FunctionParser
+
parse(Operation, int, boolean) - Method in class neureka.math.parsing.FunctionParser
 
-
ParsedCLImplementation - Class in neureka.backend.main.implementations
+
parse(String, boolean) - Method in class neureka.math.parsing.FunctionParser
 
-
ParsedCLImplementation(ImplementationFor<OpenCLDevice>, int, String, String, String, String, Function<KernelCode, KernelCode[]>) - Constructor for class neureka.backend.main.implementations.ParsedCLImplementation
+
ParsedCLImplementation - Class in neureka.backend.main.implementations
 
-
parsedOperation(String, int) - Static method in class neureka.math.parsing.ParseUtil
+
ParsedCLImplementation(ImplementationFor<OpenCLDevice>, int, String, String, String, String, Function<KernelCode, KernelCode[]>) - Constructor for class neureka.backend.main.implementations.ParsedCLImplementation
 
-
ParseUtil - Class in neureka.math.parsing
+
parsedOperation(String, int) - Static method in class neureka.math.parsing.ParseUtil
+
 
+
ParseUtil - Class in neureka.math.parsing
Utility for parsing function expressions.
-
partialDerivative() - Method in interface neureka.autograd.ADAction
-
 
-
pass(byte) - Method in class neureka.devices.opencl.KernelCaller
-
 
-
pass(byte...) - Method in class neureka.devices.opencl.KernelCaller
-
 
-
pass(double) - Method in class neureka.devices.opencl.KernelCaller
+
partialDerivative() - Method in interface neureka.autograd.ADAction
 
-
pass(double...) - Method in class neureka.devices.opencl.KernelCaller
-
 
-
pass(float) - Method in class neureka.devices.opencl.KernelCaller
-
 
-
pass(float...) - Method in class neureka.devices.opencl.KernelCaller
+
pass(Tensor<T>) - Method in class neureka.devices.opencl.KernelCaller
-
Use this to pass an array of float values to the kernel.
+
This method passes 1 argument to the kernel.
-
pass(int) - Method in class neureka.devices.opencl.KernelCaller
+
pass(int) - Method in class neureka.devices.opencl.KernelCaller
 
-
pass(int...) - Method in class neureka.devices.opencl.KernelCaller
+
pass(int...) - Method in class neureka.devices.opencl.KernelCaller
Use this to pass an array of int values to the kernel.
-
pass(long) - Method in class neureka.devices.opencl.KernelCaller
+
pass(float...) - Method in class neureka.devices.opencl.KernelCaller
+
+
Use this to pass an array of float values to the kernel.
+
+
pass(double...) - Method in class neureka.devices.opencl.KernelCaller
 
-
pass(long...) - Method in class neureka.devices.opencl.KernelCaller
+
pass(short...) - Method in class neureka.devices.opencl.KernelCaller
 
-
pass(short) - Method in class neureka.devices.opencl.KernelCaller
+
pass(long...) - Method in class neureka.devices.opencl.KernelCaller
 
-
pass(short...) - Method in class neureka.devices.opencl.KernelCaller
+
pass(byte...) - Method in class neureka.devices.opencl.KernelCaller
 
-
pass(Number) - Method in class neureka.devices.opencl.KernelCaller
+
pass(float) - Method in class neureka.devices.opencl.KernelCaller
 
-
pass(Tensor<T>) - Method in class neureka.devices.opencl.KernelCaller
-
-
This method passes 1 argument to the kernel.
-
-
passAllOf(Tensor<Number>) - Method in class neureka.devices.opencl.KernelCaller
+
pass(double) - Method in class neureka.devices.opencl.KernelCaller
+
 
+
pass(short) - Method in class neureka.devices.opencl.KernelCaller
+
 
+
pass(long) - Method in class neureka.devices.opencl.KernelCaller
+
 
+
pass(byte) - Method in class neureka.devices.opencl.KernelCaller
+
 
+
pass(Number) - Method in class neureka.devices.opencl.KernelCaller
+
 
+
passAllOf(Tensor<Number>) - Method in class neureka.devices.opencl.KernelCaller
This method passes 2 arguments to the kernel.
-
passConfOf(Tensor<Number>) - Method in class neureka.devices.opencl.KernelCaller
+
passConfOf(Tensor<Number>) - Method in class neureka.devices.opencl.KernelCaller
This method passes the ND-Configuration in the form of a flattened int array to the kernel.
-
passLocalFloats(long) - Method in class neureka.devices.opencl.KernelCaller
+
passLocalFloats(long) - Method in class neureka.devices.opencl.KernelCaller
+
 
+
pendingCount() - Method in class neureka.autograd.JITProp
 
-
pendingCount() - Method in class neureka.autograd.JITProp
+
PERFECT - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
PERFECT - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
Permute - Class in neureka.backend.main.operations.other
 
-
permute() - Method in class neureka.math.Functions
+
Permute() - Constructor for class neureka.backend.main.operations.other.Permute
 
-
permute(int...) - Method in interface neureka.Nda
+
permute() - Method in class neureka.math.Functions
+
 
+
permute(int...) - Method in interface neureka.Nda
Returns a view of the original tensor input with its dimensions permuted.
Consider a 3-dimensional tensor x with shape (2×3×5), then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
-
permute(int...) - Method in interface neureka.Tensor
+
permute(int...) - Method in interface neureka.Tensor
Returns a view of the original tensor input with its dimensions permuted.
Consider a 3-dimensional tensor x with shape (2×3×5), then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
-
Permute - Class in neureka.backend.main.operations.other
-
 
-
Permute() - Constructor for class neureka.backend.main.operations.other.Permute
+
Permuted1DConfiguration - Class in neureka.ndim.config.types.permuted
 
-
Permuted1DConfiguration - Class in neureka.ndim.config.types.permuted
+
Permuted1DConfiguration(int, int, int) - Constructor for class neureka.ndim.config.types.permuted.Permuted1DConfiguration
 
-
Permuted1DConfiguration(int, int, int) - Constructor for class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
Permuted2DCIterator - Class in neureka.ndim.iterator.types.permuted
 
-
Permuted2DCIterator - Class in neureka.ndim.iterator.types.permuted
+
Permuted2DCIterator(Permuted2DConfiguration) - Constructor for class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
 
-
Permuted2DCIterator(Permuted2DConfiguration) - Constructor for class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
Permuted2DConfiguration - Class in neureka.ndim.config.types.permuted
 
-
Permuted2DConfiguration - Class in neureka.ndim.config.types.permuted
+
Permuted2DConfiguration(int[], int[], int[]) - Constructor for class neureka.ndim.config.types.permuted.Permuted2DConfiguration
 
-
Permuted2DConfiguration(int[], int[], int[]) - Constructor for class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
Permuted3DCIterator - Class in neureka.ndim.iterator.types.permuted
 
-
Permuted3DCIterator - Class in neureka.ndim.iterator.types.permuted
+
Permuted3DCIterator(Permuted3DConfiguration) - Constructor for class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
 
-
Permuted3DCIterator(Permuted3DConfiguration) - Constructor for class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
Permuted3DConfiguration - Class in neureka.ndim.config.types.permuted
 
-
Permuted3DConfiguration - Class in neureka.ndim.config.types.permuted
+
Permuted3DConfiguration(int[], int[], int[]) - Constructor for class neureka.ndim.config.types.permuted.Permuted3DConfiguration
 
-
Permuted3DConfiguration(int[], int[], int[]) - Constructor for class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
PermutedNDConfiguration - Class in neureka.ndim.config.types.permuted
 
-
PermutedNDConfiguration - Class in neureka.ndim.config.types.permuted
+
PermutedNDConfiguration(int[], int[], int[]) - Constructor for class neureka.ndim.config.types.permuted.PermutedNDConfiguration
 
-
PermutedNDConfiguration(int[], int[], int[]) - Constructor for class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
plus() - Method in class neureka.math.Functions
 
-
plus() - Method in class neureka.math.Functions
-
 
-
plus(Tensor<V>) - Method in interface neureka.Tensor
+
plus(Tensor<V>) - Method in interface neureka.Tensor
This method will produce the addition of two tensors with the same rank (or two ranks which can be made compatible with padding ones), where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
-
plus(V) - Method in interface neureka.Tensor
+
plus(V) - Method in interface neureka.Tensor
This method will create a new Tensor with the provided double scalar added to all elements of this Tensor.
-
plusAssign() - Method in class neureka.math.Functions
+
plusAssign() - Method in class neureka.math.Functions
 
-
plusAssign(Tensor<T>) - Method in interface neureka.MutateTensor
+
plusAssign(Tensor<T>) - Method in interface neureka.MutateTensor
Performs an addition of the passed tensor to this tensor.
-
pow() - Method in class neureka.math.Functions
+
pow() - Method in class neureka.math.Functions
+
 
+
powAssign() - Method in class neureka.math.Functions
 
-
powAssign() - Method in class neureka.math.Functions
+
Power - Class in neureka.backend.main.operations.operator
 
-
power(Tensor<V>) - Method in interface neureka.Tensor
+
Power() - Constructor for class neureka.backend.main.operations.operator.Power
+
 
+
power(Tensor<V>) - Method in interface neureka.Tensor
This will produce the power of two tensors with the same rank (or two ranks which can be made compatible with padding ones), where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
-
power(V) - Method in interface neureka.Tensor
+
power(V) - Method in interface neureka.Tensor
Raises all items of this tensor to the power of the provided value.
-
Power - Class in neureka.backend.main.operations.operator
-
 
-
Power() - Constructor for class neureka.backend.main.operations.operator.Power
-
 
-
PREDEFINED - Static variable in class neureka.devices.host.machine.Hardware
+
PREDEFINED - Static variable in class neureka.devices.host.machine.Hardware
Should contain all available hardware in ascending "power" order.
-
prefVecWidthChar() - Method in class neureka.devices.opencl.OpenCLDevice
+
prefVecWidthChar() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
prefVecWidthDouble() - Method in class neureka.devices.opencl.OpenCLDevice
+
prefVecWidthDouble() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
prefVecWidthFloat() - Method in class neureka.devices.opencl.OpenCLDevice
+
prefVecWidthFloat() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
prefVecWidthInt() - Method in class neureka.devices.opencl.OpenCLDevice
+
prefVecWidthInt() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
prefVecWidthLong() - Method in class neureka.devices.opencl.OpenCLDevice
+
prefVecWidthLong() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
prefVecWidthShort() - Method in class neureka.devices.opencl.OpenCLDevice
+
prefVecWidthShort() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
prepare(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.ExecutionPreparation
+
prepare(ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.ExecutionPreparation
 
-
prepare(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
prepare(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
Preparing refers to instantiating output tensors for the provided ExecutionCall.
-
prepare(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
prepare(ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
prepareAndExecute(ExecutionCall<? extends Device<?>>, FinalExecutor) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
prepareAndExecute(ExecutionCall<? extends Device<?>>, FinalExecutor) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
process(T) - Method in class neureka.common.utility.Cache
+
process(T) - Method in class neureka.common.utility.Cache
 
-
produceNDC(boolean) - Method in interface neureka.ndim.NDConstructor
+
produceNDC(boolean) - Method in interface neureka.ndim.NDConstructor
 
-
Product - Class in neureka.backend.main.operations.indexer
+
Product - Class in neureka.backend.main.operations.indexer
This type of operation belongs to the same species as the Summation operation.
-
Product() - Constructor for class neureka.backend.main.operations.indexer.Product
+
Product() - Constructor for class neureka.backend.main.operations.indexer.Product
 
-
propertyOf(Tensor<?>) - Method in interface neureka.backend.api.Call.TensorProperty
+
propertyOf(Tensor<?>) - Method in interface neureka.backend.api.Call.TensorProperty
 
-
providesGradient() - Method in class neureka.math.implementations.FunctionInput
+
providesGradient() - Method in class neureka.math.implementations.FunctionInput
 
-
providesGradient() - Method in class neureka.math.implementations.FunctionVariable
+
providesGradient() - Method in class neureka.math.implementations.FunctionVariable
 
-
put(String, OpenCLDevice.cl_ad_hoc) - Method in class neureka.devices.opencl.KernelCache
+
put(String, OpenCLDevice.cl_ad_hoc) - Method in class neureka.devices.opencl.KernelCache
 
-
put(Function) - Method in class neureka.math.FunctionCache
+
put(Function) - Method in class neureka.math.FunctionCache
 
-
putAt(int[], T) - Method in interface neureka.MutateNda
+
putAt(Map<?, Integer>, Nda<T>) - Method in interface neureka.MutateNda
-
Use this to put a single item at a particular - position within this nd-array.
+
This method enables assigning a provided nd-array to be a subset/slice of this nd-array! + It takes a key which is used to configure a slice + sharing the same underlying data as the original nd-array.
-
putAt(int[], T) - Method in interface neureka.MutateTensor
+
putAt(int[], T) - Method in interface neureka.MutateNda
Use this to put a single item at a particular position within this nd-array.
-
putAt(int, T) - Method in interface neureka.MutateNda
-
-
Individual entries for value items in this nd-array can be set - via this method.
-
-
putAt(int, T) - Method in interface neureka.MutateTensor
+
putAt(int, T) - Method in interface neureka.MutateNda
Individual entries for value items in this nd-array can be set via this method.
-
putAt(List<?>, Nda<T>) - Method in interface neureka.MutateNda
+
putAt(List<?>, Nda<T>) - Method in interface neureka.MutateNda
This method enables injecting slices of nd-array to be assigned into this nd-array! It takes a key of various types which is used to configure a slice nd-array sharing the same underlying data as the original nd-array.
-
putAt(List<?>, Nda<T>) - Method in interface neureka.MutateTensor
-
-
This method enables injecting slices of nd-array to be assigned into this nd-array! - It takes a key of various types which is used to configure a slice - nd-array sharing the same underlying data as the original nd-array.
-
-
putAt(List<?>, T) - Method in interface neureka.MutateNda
-
-
Use this to place a single item at a particular position within this nd-array!
-
-
putAt(List<?>, T) - Method in interface neureka.MutateTensor
+
putAt(List<?>, T) - Method in interface neureka.MutateNda
Use this to place a single item at a particular position within this nd-array!
-
putAt(Map<?, Integer>, Nda<T>) - Method in interface neureka.MutateNda
+
putAt(Map<?, Integer>, Nda<T>) - Method in interface neureka.MutateTensor
This method enables assigning a provided nd-array to be a subset/slice of this nd-array! It takes a key which is used to configure a slice sharing the same underlying data as the original nd-array.
-
putAt(Map<?, Integer>, Nda<T>) - Method in interface neureka.MutateTensor
+
putAt(int[], T) - Method in interface neureka.MutateTensor
-
This method enables assigning a provided nd-array to be a subset/slice of this nd-array! - It takes a key which is used to configure a slice - sharing the same underlying data as the original nd-array.
+
Use this to put a single item at a particular + position within this nd-array.
+
+
putAt(int, T) - Method in interface neureka.MutateTensor
+
+
Individual entries for value items in this nd-array can be set + via this method.
+
+
putAt(List<?>, Nda<T>) - Method in interface neureka.MutateTensor
+
+
This method enables injecting slices of nd-array to be assigned into this nd-array! + It takes a key of various types which is used to configure a slice + nd-array sharing the same underlying data as the original nd-array.
+
+
putAt(List<?>, T) - Method in interface neureka.MutateTensor
+
+
Use this to place a single item at a particular position within this nd-array!
-

Q

-
-
quad() - Method in class neureka.math.Functions
+ + + +

Q

+
+
quad() - Method in class neureka.math.Functions
+
 
+
QUADRATIC - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Quadratic - Class in neureka.backend.main.operations.functions
+
Quadratic - Class in neureka.backend.main.operations.functions
 
-
Quadratic() - Constructor for class neureka.backend.main.operations.functions.Quadratic
+
Quadratic() - Constructor for class neureka.backend.main.operations.functions.Quadratic
 
-
QUADRATIC - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
Query() - Constructor for class neureka.devices.opencl.OpenCLDevice.Query
 
-
query() - Static method in class neureka.devices.opencl.utility.DeviceQuery
+
query() - Static method in class neureka.devices.opencl.utility.DeviceQuery
The entry point of this program
-
Query() - Constructor for class neureka.devices.opencl.OpenCLDevice.Query
-
 
-

R

-
-
random() - Method in class neureka.math.Functions
+ + + +

R

+
+
random() - Method in class neureka.math.Functions
 
-
Randomization - Class in neureka.backend.main.operations.other
+
Randomization - Class in neureka.backend.main.operations.other
This Operation takes an optional user seed, the shape of its input tensor, and @@ -7128,868 +7096,855 @@

R

floats or doubles with a gaussian distribution where the mean is 0 and the standard deviation is 1.
-
Randomization() - Constructor for class neureka.backend.main.operations.other.Randomization
+
Randomization() - Constructor for class neureka.backend.main.operations.other.Randomization
 
-
rank() - Method in interface neureka.ndim.config.NDConfiguration
+
rank() - Method in interface neureka.ndim.config.NDConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
rank() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
rank() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
rank() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
rank() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
rank() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
rank() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
rank() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
rank() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
rank() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
rank() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
rank() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
rank() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
rank() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
rank() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
rank() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
rank() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
This method returns the number of axis of a nd-array / Tensor which is equal to the length of the shape of an nd-array / Tensor.
-
rank() - Method in interface neureka.ndim.iterator.NDIterator
+
rank() - Method in interface neureka.ndim.iterator.NDIterator
 
-
rank() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
rank() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
 
-
rank() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
rank() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
 
-
rank() - Method in interface neureka.ndim.NDimensional
+
rank() - Method in interface neureka.ndim.NDimensional
 
-
read(List<Object>, Function<Object, Object>) - Static method in class neureka.common.utility.ListReader
+
read(List<Object>, Function<Object, Object>) - Static method in class neureka.common.utility.ListReader
Reads the provided data and turns it into a ListReader.Result object, containing a flattened list of the data alongside its shape and data type.
-
readAll(boolean) - Method in interface neureka.devices.Device.Access
+
readAll(boolean) - Method in interface neureka.devices.Device.Access
Use this to read the full data array of the accessed tensor.
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F32
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F32
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F64
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F32
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I16
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F64
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I32
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F64
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I64
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I16
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I8
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I16
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI16
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I32
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI32
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I32
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI64
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I64
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI8
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I64
 
-
readAndConvertForeignDataFrom(DataInput, int) - Method in interface neureka.dtype.NumericType
-
-
This method expects the provided stream to spit out bytes which can be read as holder type elements.
-
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F32
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I8
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F64
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I8
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I16
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI16
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I32
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI16
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I64
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI32
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I8
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI32
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI16
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI64
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI32
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI64
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI64
+
readAndConvertForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI8
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI8
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI8
 
-
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in interface neureka.dtype.NumericType
+
readAndConvertForeignDataFrom(DataInput, int) - Method in interface neureka.dtype.NumericType
+
+
This method expects the provided stream to spit out bytes which can be read as holder type elements.
+
+
readAndConvertForeignDataFrom(Iterator<T>, int) - Method in interface neureka.dtype.NumericType
This method expects the provided iterator to return elements which can be read as holder type elements.
-
readArray(Class<A>, int, int) - Method in interface neureka.devices.Device.Access
+
readArray(Class<A>, int, int) - Method in interface neureka.devices.Device.Access
Use this to read an array of items from the accessed tensor by specifying a start position of the chunk of data that should be read.
-
readAt(int) - Method in interface neureka.devices.Device.Access
+
readAt(int) - Method in interface neureka.devices.Device.Access
Find a particular tensor item by providing its location.
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F32
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F32
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F64
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F32
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I16
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.F64
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I32
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F64
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I64
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I16
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I8
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I16
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI16
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I32
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI32
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I32
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI64
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I64
 
-
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI8
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I64
 
-
readForeignDataFrom(DataInput, int) - Method in interface neureka.dtype.NumericType
-
-
This method expects the provided stream to spit out bytes which can be read as target type elements.
-
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F32
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.I8
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.F64
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I8
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I16
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI16
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I32
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI16
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I64
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI32
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.I8
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI32
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI16
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI64
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI32
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI64
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI64
+
readForeignDataFrom(DataInput, int) - Method in class neureka.dtype.custom.UI8
 
-
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI8
+
readForeignDataFrom(Iterator<T>, int) - Method in class neureka.dtype.custom.UI8
 
-
readForeignDataFrom(Iterator<T>, int) - Method in interface neureka.dtype.NumericType
+
readForeignDataFrom(DataInput, int) - Method in interface neureka.dtype.NumericType
+
+
This method expects the provided stream to spit out bytes which can be read as target type elements.
+
+
readForeignDataFrom(Iterator<T>, int) - Method in interface neureka.dtype.NumericType
This method expects the provided iterator to return elements which can be read as holder type elements.
-
readResource(String) - Method in class neureka.Neureka.Utility
+
readResource(String) - Method in class neureka.Neureka.Utility
Helper method which reads the file with the given name and returns the contents of this file as a String.
-
rearrange(int[], int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
+
rearrange(int[], int[], int[]) - Method in enum neureka.ndim.config.NDConfiguration.Layout
 
-
rearrange(int[], int[], int[]) - Method in enum class neureka.ndim.config.NDConfiguration.Layout
+
rearrange(int[], int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
 
-
rearrangeInputs(int...) - Method in class neureka.backend.api.Call
+
rearrangeInputs(int...) - Method in class neureka.backend.api.Call
 
-
ReceiveForDevice<D extends Device<?>> - Interface in neureka.backend.api.ini
+
ReceiveForDevice<D extends Device<?>> - Interface in neureka.backend.api.ini
 
-
ReceiveForOperation<D extends Device<?>> - Interface in neureka.backend.api.ini
+
ReceiveForOperation<D extends Device<?>> - Interface in neureka.backend.api.ini
 
-
recompile() - Method in class neureka.devices.opencl.OpenCLPlatform
+
recompile() - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
ReferenceCounter - Class in neureka.devices
+
ReferenceCounter - Class in neureka.devices
 
-
ReferenceCounter(Consumer<ReferenceCounter.ChangeEvent>) - Constructor for class neureka.devices.ReferenceCounter
+
ReferenceCounter(Consumer<ReferenceCounter.ChangeEvent>) - Constructor for class neureka.devices.ReferenceCounter
 
-
ReferenceCounter.ChangeEvent - Class in neureka.devices
+
ReferenceCounter.ChangeEvent - Class in neureka.devices
 
-
ReferenceCounter.ChangeType - Enum Class in neureka.devices
+
ReferenceCounter.ChangeType - Enum in neureka.devices
 
-
register(Object, Runnable) - Method in interface neureka.devices.DeviceCleaner
+
register(Object, Runnable) - Method in interface neureka.devices.DeviceCleaner
 
-
Relation<V> - Class in neureka.framing
+
Relation<V> - Class in neureka.framing
This class is an important tensor component responsible for managing the relationships between slices and the tensors from which they have been derived.
-
relayout() - Method in class neureka.math.Functions
+
ReLayout - Class in neureka.backend.main.operations.other
 
-
ReLayout - Class in neureka.backend.main.operations.other
+
ReLayout() - Constructor for class neureka.backend.main.operations.other.ReLayout
 
-
ReLayout() - Constructor for class neureka.backend.main.operations.other.ReLayout
+
relayout() - Method in class neureka.math.Functions
 
-
relu() - Method in class neureka.math.Functions
+
RELU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
relu() - Method in interface neureka.Tensor
-
-
This method is a functionally identical to the following alternatives:
-
-
ReLU - Class in neureka.backend.main.operations.functions
+
ReLU - Class in neureka.backend.main.operations.functions
 
-
ReLU() - Constructor for class neureka.backend.main.operations.functions.ReLU
+
ReLU() - Constructor for class neureka.backend.main.operations.functions.ReLU
 
-
RELU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
relu() - Method in class neureka.math.Functions
 
-
rem(int) - Method in interface neureka.Tensor
+
relu() - Method in interface neureka.Tensor
+
+
This method is a functionally identical to the following alternatives:
+
+
rem(int) - Method in interface neureka.Tensor
-
This method is synonymous to the Tensor.mod(int) method.
+
This method is synonymous to the Tensor.mod(int) method.
-
remove(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
+
remove(Class<T>) - Method in class neureka.common.composition.AbstractComponentOwner
This method removes a component identified by the passed Class instance if found in the stored component collection.
-
remove(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
+
remove(Class<T>) - Method in interface neureka.common.composition.ComponentOwner
Use this to remove a component of the specified component type class.
-
removeChild(Tensor<V>) - Method in class neureka.framing.Relation
-
 
-
REMOVED - Enum constant in enum class neureka.common.composition.Component.IsBeing
+
removeChild(Tensor<V>) - Method in class neureka.framing.Relation
 
-
replace(Object) - Method in class neureka.framing.fluent.AxisFrame
+
replace(Object) - Method in class neureka.framing.fluent.AxisFrame
 
-
replace(ValueType) - Method in interface neureka.framing.fluent.Replace
+
Replace<ValueType,ReplacementType,ReturnType> - Interface in neureka.framing.fluent
 
-
Replace<ValueType,ReplacementType,ReturnType> - Interface in neureka.framing.fluent
+
replace(ValueType) - Method in interface neureka.framing.fluent.Replace
 
-
REPLACED - Enum constant in enum class neureka.common.composition.Component.IsBeing
+
replacer(Replace<Object, Object, NDFrame<ValueType>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
 
-
replacer(Replace<Object, Object, NDFrame<ValueType>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
-
 
-
representing(Nda<?>) - Static method in class neureka.view.NdaAsString
+
representing(Nda<?>) - Static method in class neureka.view.NdaAsString
A builder providing multiple different configuration options for building a NdaAsString instance in a fluent way.
-
reset() - Method in class neureka.backend.api.BackendContext
+
reset() - Method in class neureka.backend.api.BackendContext
 
-
reset() - Method in interface neureka.backend.api.BackendExtension
+
reset() - Method in interface neureka.backend.api.BackendExtension
-
This will indirectly be called through the Neureka.reset() method, +
This will indirectly be called through the Neureka.reset() method, which is responsible for resetting the library settings.
-
reset() - Method in class neureka.backend.ocl.CLBackend
+
reset() - Method in class neureka.backend.ocl.CLBackend
 
-
reset() - Method in class neureka.backend.ocl.CLSettings
+
reset() - Method in class neureka.backend.ocl.CLSettings
 
-
reset() - Method in class neureka.Neureka
+
reset() - Method in class neureka.Neureka
This method will try to reload the "library_settings.groovy" script which will re-configure the library wide Neureka.Settings instance nested inside Neureka.
-
reshape() - Method in class neureka.math.Functions
+
Reshape - Class in neureka.backend.main.operations.other
+
 
+
Reshape() - Constructor for class neureka.backend.main.operations.other.Reshape
 
-
reshape(int...) - Method in interface neureka.Nda
+
reshape() - Method in class neureka.math.Functions
+
 
+
reshape(int...) - Method in interface neureka.Nda
Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape.
-
reshape(int...) - Method in interface neureka.Tensor
+
reshape(int...) - Method in interface neureka.Tensor
Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape.
-
Reshape - Class in neureka.backend.main.operations.other
-
 
-
Reshape() - Constructor for class neureka.backend.main.operations.other.Reshape
+
resolve() - Method in class neureka.fluent.slicing.AxisSliceBuilder
 
-
resolve() - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
restore(Tensor<Object>) - Method in class neureka.devices.file.FileDevice
+
restore(Tensor<Object>) - Method in class neureka.devices.host.CPU
 
-
restore(Tensor<Number>) - Method in class neureka.devices.opencl.OpenCLDevice
+
restore(Tensor<Number>) - Method in class neureka.devices.opencl.OpenCLDevice
This method assumes that the passed tensor is stored on this device instance.
-
restore(Tensor<Object>) - Method in class neureka.devices.file.FileDevice
-
restore(Tensor<Object>) - Method in class neureka.devices.host.CPU
-
 
-
restore(Tensor<Number>) - Method in class neureka.devices.file.IDXHandle
-
 
-
restore(Tensor<V>) - Method in interface neureka.devices.Storage
+
restore(Tensor<V>) - Method in interface neureka.devices.Storage
 
-
Result - Class in neureka.backend.api
+
Result - Class in neureka.backend.api
An immutable wrapper for a tensor as a result of anb Execution as well as an ADActionSupplier for providing auto-differentiation support.
-
RGB_1INT - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
RGB_555_USHORT - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
RGB_565_USHORT - Enum constant in enum class neureka.Tensor.ImageType
-
 
-
RMSProp<V extends Number> - Class in neureka.optimization.implementations
+
RMSProp<V extends java.lang.Number> - Class in neureka.optimization.implementations
Root Mean Squared Propagation, or RMSProp, is an extension of gradient descent and the AdaGrad version of gradient descent that uses a decaying average of partial gradients in the adaptation of the step size for each parameter.
-
RMSProp - Static variable in interface neureka.optimization.Optimizer
-
 
-
RMSPropFactory - Class in neureka.optimization.implementations
+
RMSProp - Static variable in interface neureka.optimization.Optimizer
 
-
RMSPropFactory() - Constructor for class neureka.optimization.implementations.RMSPropFactory
+
RMSPropFactory - Class in neureka.optimization.implementations
 
-
ROW_MAJOR - Enum constant in enum class neureka.ndim.config.NDConfiguration.Layout
+
RMSPropFactory() - Constructor for class neureka.optimization.implementations.RMSPropFactory
 
-
ROW_MAJOR - Enum constant in enum class neureka.ndim.config.NDTrait
-
 
-
rqsGradient() - Method in interface neureka.Tensor
+
rqsGradient() - Method in interface neureka.Tensor
This flag will indirectly trigger the activation of the autograd / auto-differentiation system of this library! If the flag is set to 'true' and the tensor is used for computation then - it will also receive gradients when the Tensor.backward() method is being called + it will also receive gradients when the Tensor.backward() method is being called on any descendant tensor within the computation graph.
-
run(Runnable) - Method in class neureka.backend.api.BackendContext.Runner
+
run(Runnable) - Method in class neureka.backend.api.BackendContext.Runner
Use this method to supply a lambda which will be executed in the BackendContext which produced this very BackendContext.Runner instance.
-
run(ExecutionCall<D>) - Method in interface neureka.backend.api.ImplementationFor
+
run(ExecutionCall<D>) - Method in interface neureka.backend.api.ImplementationFor
This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented.
-
run(ExecutionCall<D>) - Method in class neureka.backend.api.template.implementations.AbstractImplementationFor
-
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
+
run(ExecutionCall<D>) - Method in class neureka.backend.api.template.implementations.AbstractImplementationFor
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastAddition
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastDivision
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastIdentity
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.convolution.AbstractCPUConvolution
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastModulo
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastMultiplication
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPUElementwiseAssignFun
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastPower
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPUElementwiseFunction
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastSubtraction
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPURandomization
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.linear.CPUDot
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.matmul.CPUMatMul
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.scalar.CPUScalarFunction
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.convolution.AbstractCPUConvolution
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.operations.other.internal.CPUReduce
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.elementwise.CLRandomization
 
-
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.operations.other.internal.CPUSum
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastAddition
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPUElementwiseAssignFun
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastDivision
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPUElementwiseFunction
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastIdentity
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.elementwise.CPURandomization
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastModulo
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.linear.CLDot
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastMultiplication
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.linear.CPUDot
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastPower
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.matmul.CPUMatMul
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.broadcast.CLScalarBroadcastSubtraction
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.scalar.CLScalarFunction
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.elementwise.CLRandomization
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.linear.CLDot
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.scalar.CPUScalarFunction
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.implementations.scalar.CLScalarFunction
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.operations.linear.internal.opencl.CLGEMM
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.operations.linear.internal.opencl.CLGEMM
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.operations.linear.internal.opencl.CLReduce
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.operations.linear.internal.opencl.CLReduce
+
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.operations.linear.internal.opencl.CLSum
 
-
run(ExecutionCall<OpenCLDevice>) - Method in class neureka.backend.main.operations.linear.internal.opencl.CLSum
-
 
-
run(Tensor<Float>, OpenCLDevice) - Static method in class neureka.backend.main.operations.linear.internal.opencl.CLSum
+
run(Tensor<Float>, OpenCLDevice) - Static method in class neureka.backend.main.operations.linear.internal.opencl.CLSum
This method compiles and executes the kernel that will return the sum of the elements in the in tensor.
-
runAndGet(Supplier<T>) - Method in class neureka.backend.api.BackendContext.Runner
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.operations.other.internal.CPUReduce
+
 
+
run(ExecutionCall<CPU>) - Method in class neureka.backend.main.operations.other.internal.CPUSum
+
 
+
runAndGet(Supplier<T>) - Method in class neureka.backend.api.BackendContext.Runner
Use this method to supply a lambda which will be executed in the BackendContext which produced this very BackendContext.Runner instance.
-
runner() - Method in class neureka.backend.api.BackendContext
+
runner() - Method in class neureka.backend.api.BackendContext
A BackendContext.Runner wraps both the called context as well as the context of the caller in order to perform temporary context switching during the execution of lambdas passed to the BackendContext.Runner.
-
running(Operation) - Method in class neureka.backend.api.ExecutionCall.Builder
+
running(Operation) - Method in class neureka.backend.api.ExecutionCall.Builder
 
-

S

-
-
scalar(V) - Method in class neureka.fluent.building.NdaBuilder
+ + + +

S

+
+
scalar(V) - Method in class neureka.fluent.building.NdaBuilder
 
-
scalar(V) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
+
scalar(V) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
This method created and return a scalar Tensor instance which wraps the provided value.
-
scalar(V) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
+
scalar(V) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
This method created and return a scalar Tensor instance which wraps the provided value.
-
ScalarAbsolute - Class in neureka.backend.main.implementations.fun
+
ScalarAbsolute - Class in neureka.backend.main.implementations.fun
 
-
ScalarAbsolute() - Constructor for class neureka.backend.main.implementations.fun.ScalarAbsolute
+
ScalarAbsolute() - Constructor for class neureka.backend.main.implementations.fun.ScalarAbsolute
 
-
ScalarAlgorithm - Class in neureka.backend.main.algorithms
+
ScalarAlgorithm - Class in neureka.backend.main.algorithms
 
-
ScalarAlgorithm() - Constructor for class neureka.backend.main.algorithms.ScalarAlgorithm
+
ScalarAlgorithm() - Constructor for class neureka.backend.main.algorithms.ScalarAlgorithm
 
-
ScalarBroadcast - Class in neureka.backend.main.algorithms
+
ScalarBroadcast - Class in neureka.backend.main.algorithms
 
-
ScalarBroadcast(ScalarFun) - Constructor for class neureka.backend.main.algorithms.ScalarBroadcast
+
ScalarBroadcast(ScalarFun) - Constructor for class neureka.backend.main.algorithms.ScalarBroadcast
 
-
ScalarCbrt - Class in neureka.backend.main.implementations.fun
+
ScalarCbrt - Class in neureka.backend.main.implementations.fun
 
-
ScalarCbrt() - Constructor for class neureka.backend.main.implementations.fun.ScalarCbrt
+
ScalarCbrt() - Constructor for class neureka.backend.main.implementations.fun.ScalarCbrt
 
-
ScalarCosinus - Class in neureka.backend.main.implementations.fun
+
ScalarCosinus - Class in neureka.backend.main.implementations.fun
 
-
ScalarCosinus() - Constructor for class neureka.backend.main.implementations.fun.ScalarCosinus
+
ScalarCosinus() - Constructor for class neureka.backend.main.implementations.fun.ScalarCosinus
 
-
ScalarExp - Class in neureka.backend.main.implementations.fun
+
ScalarExp - Class in neureka.backend.main.implementations.fun
 
-
ScalarExp() - Constructor for class neureka.backend.main.implementations.fun.ScalarExp
+
ScalarExp() - Constructor for class neureka.backend.main.implementations.fun.ScalarExp
 
-
ScalarFun - Interface in neureka.backend.main.implementations.fun.api
+
ScalarFun - Interface in neureka.backend.main.implementations.fun.api
 
-
ScalarGaSU - Class in neureka.backend.main.implementations.fun
+
ScalarGaSU - Class in neureka.backend.main.implementations.fun
The Self Gated ScalarSoftsign Unit is based on the ScalarSoftsign function (a computationally cheap non-exponential quasi ScalarTanh) making it a polynomially based version of the ScalarGaTU function which is itself based on the ScalarTanh function.
-
ScalarGaSU() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaSU
+
ScalarGaSU() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaSU
 
-
ScalarGaTU - Class in neureka.backend.main.implementations.fun
+
ScalarGaTU - Class in neureka.backend.main.implementations.fun
The Self Gated ScalarTanh Unit is based on the ScalarTanh making it an exponentiation based version of the ScalarGaSU function which is itself based on the ScalarSoftsign function (a computationally cheap non-exponential quasi ScalarTanh).
-
ScalarGaTU() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaTU
+
ScalarGaTU() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaTU
 
-
ScalarGaussian - Class in neureka.backend.main.implementations.fun
+
ScalarGaussian - Class in neureka.backend.main.implementations.fun
 
-
ScalarGaussian() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaussian
+
ScalarGaussian() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaussian
 
-
ScalarGaussianFast - Class in neureka.backend.main.implementations.fun
+
ScalarGaussianFast - Class in neureka.backend.main.implementations.fun
 
-
ScalarGaussianFast() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaussianFast
+
ScalarGaussianFast() - Constructor for class neureka.backend.main.implementations.fun.ScalarGaussianFast
 
-
ScalarGeLU - Class in neureka.backend.main.implementations.fun
+
ScalarGeLU - Class in neureka.backend.main.implementations.fun
The GELU activation function is based on the standard Gaussian cumulative distribution function and is defined as x Φ( x ) and implemented as x * sigmoid(x * 1.702).
-
ScalarGeLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarGeLU
+
ScalarGeLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarGeLU
 
-
ScalarIdentity - Class in neureka.backend.main.implementations.fun
+
ScalarIdentity - Class in neureka.backend.main.implementations.fun
 
-
ScalarIdentity() - Constructor for class neureka.backend.main.implementations.fun.ScalarIdentity
+
ScalarIdentity() - Constructor for class neureka.backend.main.implementations.fun.ScalarIdentity
 
-
ScalarLog10 - Class in neureka.backend.main.implementations.fun
+
ScalarLog10 - Class in neureka.backend.main.implementations.fun
 
-
ScalarLog10() - Constructor for class neureka.backend.main.implementations.fun.ScalarLog10
+
ScalarLog10() - Constructor for class neureka.backend.main.implementations.fun.ScalarLog10
 
-
ScalarLogarithm - Class in neureka.backend.main.implementations.fun
+
ScalarLogarithm - Class in neureka.backend.main.implementations.fun
 
-
ScalarLogarithm() - Constructor for class neureka.backend.main.implementations.fun.ScalarLogarithm
+
ScalarLogarithm() - Constructor for class neureka.backend.main.implementations.fun.ScalarLogarithm
 
-
ScalarQuadratic - Class in neureka.backend.main.implementations.fun
+
ScalarQuadratic - Class in neureka.backend.main.implementations.fun
 
-
ScalarQuadratic() - Constructor for class neureka.backend.main.implementations.fun.ScalarQuadratic
+
ScalarQuadratic() - Constructor for class neureka.backend.main.implementations.fun.ScalarQuadratic
 
-
ScalarReLU - Class in neureka.backend.main.implementations.fun
+
ScalarReLU - Class in neureka.backend.main.implementations.fun
 
-
ScalarReLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarReLU
+
ScalarReLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarReLU
 
-
ScalarSeLU - Class in neureka.backend.main.implementations.fun
+
ScalarSeLU - Class in neureka.backend.main.implementations.fun
The Scaled Exponential Linear Unit, or SELU, is an activation function that induces self-normalizing properties.
-
ScalarSeLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarSeLU
+
ScalarSeLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarSeLU
 
-
ScalarSigmoid - Class in neureka.backend.main.implementations.fun
+
ScalarSigmoid - Class in neureka.backend.main.implementations.fun
 
-
ScalarSigmoid() - Constructor for class neureka.backend.main.implementations.fun.ScalarSigmoid
+
ScalarSigmoid() - Constructor for class neureka.backend.main.implementations.fun.ScalarSigmoid
 
-
ScalarSiLU - Class in neureka.backend.main.implementations.fun
+
ScalarSiLU - Class in neureka.backend.main.implementations.fun
The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x).
-
ScalarSiLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarSiLU
+
ScalarSiLU() - Constructor for class neureka.backend.main.implementations.fun.ScalarSiLU
 
-
ScalarSinus - Class in neureka.backend.main.implementations.fun
+
ScalarSinus - Class in neureka.backend.main.implementations.fun
 
-
ScalarSinus() - Constructor for class neureka.backend.main.implementations.fun.ScalarSinus
+
ScalarSinus() - Constructor for class neureka.backend.main.implementations.fun.ScalarSinus
 
-
ScalarSoftplus - Class in neureka.backend.main.implementations.fun
+
ScalarSoftplus - Class in neureka.backend.main.implementations.fun
SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
-
ScalarSoftplus() - Constructor for class neureka.backend.main.implementations.fun.ScalarSoftplus
+
ScalarSoftplus() - Constructor for class neureka.backend.main.implementations.fun.ScalarSoftplus
 
-
ScalarSoftsign - Class in neureka.backend.main.implementations.fun
+
ScalarSoftsign - Class in neureka.backend.main.implementations.fun
The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function which rescales the inputs between -1 and 1, very much like the ScalarTanh function.
-
ScalarSoftsign() - Constructor for class neureka.backend.main.implementations.fun.ScalarSoftsign
+
ScalarSoftsign() - Constructor for class neureka.backend.main.implementations.fun.ScalarSoftsign
+
 
+
ScalarSqrt - Class in neureka.backend.main.implementations.fun
 
-
ScalarSqrt - Class in neureka.backend.main.implementations.fun
+
ScalarSqrt() - Constructor for class neureka.backend.main.implementations.fun.ScalarSqrt
 
-
ScalarSqrt() - Constructor for class neureka.backend.main.implementations.fun.ScalarSqrt
+
ScalarSumAlgorithm - Class in neureka.backend.main.algorithms
 
-
ScalarSumAlgorithm - Class in neureka.backend.main.algorithms
+
ScalarSumAlgorithm() - Constructor for class neureka.backend.main.algorithms.ScalarSumAlgorithm
 
-
ScalarSumAlgorithm() - Constructor for class neureka.backend.main.algorithms.ScalarSumAlgorithm
+
ScalarTanh - Class in neureka.backend.main.implementations.fun
 
-
ScalarTanh - Class in neureka.backend.main.implementations.fun
+
ScalarTanh() - Constructor for class neureka.backend.main.implementations.fun.ScalarTanh
 
-
ScalarTanh() - Constructor for class neureka.backend.main.implementations.fun.ScalarTanh
+
ScalarTanhFast - Class in neureka.backend.main.implementations.fun
 
-
ScalarTanhFast - Class in neureka.backend.main.implementations.fun
+
ScalarTanhFast() - Constructor for class neureka.backend.main.implementations.fun.ScalarTanhFast
 
-
ScalarTanhFast() - Constructor for class neureka.backend.main.implementations.fun.ScalarTanhFast
+
SELU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
selu() - Method in class neureka.math.Functions
+
selu(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSeLU
+
 
+
SeLU - Class in neureka.backend.main.operations.functions
The Scaled Exponential Linear Unit, or SELU, is an activation functions that induce self-normalizing properties.
-
selu(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSeLU
+
SeLU() - Constructor for class neureka.backend.main.operations.functions.SeLU
 
-
SeLU - Class in neureka.backend.main.operations.functions
+
selu() - Method in class neureka.math.Functions
The Scaled Exponential Linear Unit, or SELU, is an activation functions that induce self-normalizing properties.
-
SeLU() - Constructor for class neureka.backend.main.operations.functions.SeLU
-
 
-
SELU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
-
 
-
sequential(int, CPU.RangeWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
+
sequential(int, CPU.RangeWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
This method will simply execute the provided CPU.RangeWorkload lambda sequentially with 0 as the start index and workloadSize as the exclusive range.
-
set(int) - Method in interface neureka.framing.fluent.Set
+
set(BackendExtension) - Method in class neureka.backend.api.BackendContext
+
+
Registers the provided BackendExtension instance + which can then be accessed via BackendContext.find(Class).
+
+
set(Class<? extends Operation>, Class<? extends A>, Function<LoadingContext, ImplementationFor<D>>) - Method in interface neureka.backend.api.ini.ReceiveForDevice
 
-
set(int[]) - Method in interface neureka.ndim.iterator.NDIterator
+
set(Class<? extends DeviceAlgorithm>, Function<LoadingContext, ImplementationFor<D>>) - Method in interface neureka.backend.api.ini.ReceiveForOperation
 
-
set(int[]) - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
-
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
set(T) - Method in class neureka.common.composition.AbstractComponentOwner
+
+
This methods stores the passed component inside the component + collection of this class...
+
+
set(T) - Method in interface neureka.common.composition.ComponentOwner
+
+
Use this to set a component.
+
+
Set<V> - Interface in neureka.framing.fluent
 
-
set(int[]) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
set(int) - Method in interface neureka.framing.fluent.Set
 
-
set(int[], T) - Method in interface neureka.MutateNda
+
set(V) - Method in interface neureka.MutateNda.Item
-
Use this to place a single item at a particular position within this nd-array!
+
Set the value at the targeted position.
-
set(int[], T) - Method in interface neureka.MutateTensor
+
set(int[], T) - Method in interface neureka.MutateNda
Use this to place a single item at a particular position within this nd-array!
-
set(int, int) - Method in interface neureka.ndim.iterator.NDIterator
-
 
-
set(int, int) - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
-
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
-
 
-
set(int, int) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
-
 
-
set(int, int, int, T) - Method in interface neureka.MutateNda
+
set(int, int, T) - Method in interface neureka.MutateNda
 
-
set(int, int, int, T) - Method in interface neureka.MutateTensor
-
set(int, int, T) - Method in interface neureka.MutateNda
+
set(int, int, int, T) - Method in interface neureka.MutateNda
 
-
set(int, int, T) - Method in interface neureka.MutateTensor
-
set(int, T) - Method in interface neureka.MutateNda
+
set(int, T) - Method in interface neureka.MutateNda
Individual entries for value items in this nd-array can be set via this method.
-
set(int, T) - Method in interface neureka.MutateTensor
+
set(int[], T) - Method in interface neureka.MutateTensor
+
+
Use this to place a single item at a particular position within this nd-array!
+
+
set(int, int, T) - Method in interface neureka.MutateTensor
+
set(int, int, int, T) - Method in interface neureka.MutateTensor
+
set(int, T) - Method in interface neureka.MutateTensor
Individual entries for value items in this nd-array can be set via this method.
-
set(Class<? extends DeviceAlgorithm>, Function<LoadingContext, ImplementationFor<D>>) - Method in interface neureka.backend.api.ini.ReceiveForOperation
+
set(int, int) - Method in interface neureka.ndim.iterator.NDIterator
 
-
set(Class<? extends Operation>, Class<? extends A>, Function<LoadingContext, ImplementationFor<D>>) - Method in interface neureka.backend.api.ini.ReceiveForDevice
+
set(int[]) - Method in interface neureka.ndim.iterator.NDIterator
 
-
set(BackendExtension) - Method in class neureka.backend.api.BackendContext
-
-
Registers the provided BackendExtension instance - which can then be accessed via BackendContext.find(Class).
-
-
set(Neureka) - Static method in class neureka.Neureka
+
set(int, int) - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.permuted.Permuted2DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.permuted.Permuted3DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
set(int, int) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
 
+
set(int[]) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
 
+
set(int, int) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
 
+
set(int[]) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
 
+
set(Neureka) - Static method in class neureka.Neureka
Neureka is a thread local singleton.
-
set(OptimizerFactory) - Method in interface neureka.Tensor
+
set(OptimizerFactory) - Method in interface neureka.Tensor
Configures an Optimizer for this tensor based on the given OptimizerFactory which will be used to create a new Optimizer instance specific to this tensor.
-
set(T) - Method in class neureka.common.composition.AbstractComponentOwner
-
-
This methods stores the passed component inside the component - collection of this class...
-
-
set(T) - Method in interface neureka.common.composition.ComponentOwner
-
-
Use this to set a component.
-
-
set(V) - Method in interface neureka.MutateNda.Item
-
-
Set the value at the targeted position.
-
-
Set<V> - Interface in neureka.framing.fluent
-
 
-
setAlgorithm(Class<T>, T) - Method in interface neureka.backend.api.Operation
+
setAlgorithm(Class<T>, T) - Method in interface neureka.backend.api.Operation
Operation implementations embody a component system hosting unique Algorithm instances.
-
setAlgorithm(Class<T>, T) - Method in class neureka.backend.api.template.operations.AbstractOperation
+
setAlgorithm(T) - Method in interface neureka.backend.api.Operation
+
 
+
setAlgorithm(Class<T>, T) - Method in class neureka.backend.api.template.operations.AbstractOperation
Operation implementations embody a component system hosting unique Algorithm instances.
-
setAlgorithm(T) - Method in interface neureka.backend.api.Operation
+
setAutoConvertToFloat(boolean) - Method in class neureka.backend.ocl.CLSettings
 
-
setAutoConvertToFloat(boolean) - Method in class neureka.backend.ocl.CLSettings
-
 
-
setAutogradModeFor(ADSupportPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
setAutogradModeFor(ADSupportPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
A ADSupportPredicate lambda checks what kind of auto differentiation mode an Algorithm supports for a given ExecutionCall.
-
setAutogradModeFor(ADSupportPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
setAutogradModeFor(ADSupportPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
A ADSupportPredicate lambda checks what kind of auto differentiation mode an Algorithm supports for a given ExecutionCall.
-
setBackend(BackendContext) - Method in class neureka.Neureka
+
setBackend(BackendContext) - Method in class neureka.Neureka
Use this method to attach a backend context (for operations) to this thread local library context.
-
setCallPreparation(ExecutionPreparation) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
setCallPreparation(ExecutionPreparation) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
An Algorithm will produce a Result when executing an ExecutionCall.
-
setCellSize(int) - Method in class neureka.view.NDPrintSettings
+
setCellSize(int) - Method in class neureka.view.NDPrintSettings
A cell size refers to the number of characters reserved to - the String representation of a single element.
+ the String representation of a single element.
-
setData(Data<T>) - Method in interface neureka.MutateTensor
+
setData(Data<T>) - Method in interface neureka.MutateTensor
At the heart of every tensor is the Data object, which holds the actual data array, a sequence of values of the same type.
-
setDataAt(int, T) - Method in interface neureka.MutateTensor
+
setDataAt(int, T) - Method in interface neureka.MutateTensor
A tensor ought to have some way to selectively modify its underlying data array.
-
setDefaultDataTypeClass(Class<?>) - Method in class neureka.Neureka.Settings.DType
+
setDefaultDataTypeClass(Class<?>) - Method in class neureka.Neureka.Settings.DType
The default data type is not relevant most of the time.
-
setDerivation(OperationBuilder.Derivation) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
setDerivation(OperationBuilder.Derivation) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
setExecution(Execution) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
setExecution(Execution) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
 
-
setExecution(Execution) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
setExecution(Execution) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
 
-
setGradientApplyRequested(boolean) - Method in interface neureka.Tensor
+
setGradientApplyRequested(boolean) - Method in interface neureka.Tensor
This flag works alongside two autograd features which can be enabled inside the library settings.
-
setHasDerivatives(boolean) - Method in class neureka.view.NDPrintSettings
-
 
-
setHasGradient(boolean) - Method in class neureka.view.NDPrintSettings
+
setHasDerivatives(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setHasRecursiveGraph(boolean) - Method in class neureka.view.NDPrintSettings
+
setHasGradient(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setHasShape(boolean) - Method in class neureka.view.NDPrintSettings
+
setHasRecursiveGraph(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setHasSlimNumbers(boolean) - Method in class neureka.view.NDPrintSettings
+
setHasShape(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setHasValue(boolean) - Method in class neureka.view.NDPrintSettings
+
setHasSlimNumbers(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setImplementationFor(Class<D>, E) - Method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
setHasValue(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setImplementationFor(Class<D>, I) - Method in interface neureka.backend.api.DeviceAlgorithm
+
setImplementationFor(Class<D>, I) - Method in interface neureka.backend.api.DeviceAlgorithm
Implementations of the DeviceAlgorithm interface ought to express a compositional design pattern.
-
setIndent(String) - Method in class neureka.view.NDPrintSettings
+
setImplementationFor(Class<D>, E) - Method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
setIndex(int) - Method in interface neureka.framing.fluent.AxisFrame.Set
+
setIndent(String) - Method in class neureka.view.NDPrintSettings
 
-
setIsApplyingGradientWhenRequested(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
+
setIndex(int) - Method in interface neureka.framing.fluent.AxisFrame.Set
+
 
+
setIsApplyingGradientWhenRequested(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
Gradients will only be applied if requested.
-
setIsApplyingGradientWhenTensorIsUsed(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
+
setIsApplyingGradientWhenTensorIsUsed(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
Gradients will automatically be applied (or JITed) to tensors as soon as they are being used for calculation (GraphNode instantiation).
-
setIsAutoConvertingExternalDataToJVMTypes(boolean) - Method in class neureka.Neureka.Settings.DType
+
setIsAutoConvertingExternalDataToJVMTypes(boolean) - Method in class neureka.Neureka.Settings.DType
This flag will determine if foreign data types will be converted into the next best fit (in terms of bits) or if it should be converted into something that does not mess with the representation of the data.
-
setIsCellBound(boolean) - Method in class neureka.view.NDPrintSettings
+
setIsCellBound(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setIsDeletingIntermediateTensors(boolean) - Method in class neureka.Neureka.Settings.Debug
+
setIsDeletingIntermediateTensors(boolean) - Method in class neureka.Neureka.Settings.Debug
Function instances will produce hidden intermediate results when executing an array of inputs.
-
setIsIntermediate(boolean) - Method in interface neureka.MutateTensor
+
setIsIntermediate(boolean) - Method in interface neureka.MutateTensor
Intermediate tensors are internal non-user tensors which may be eligible for deletion when further consumed by a Function.
-
setIsKeepingDerivativeTargetPayloads(boolean) - Method in class neureka.Neureka.Settings.Debug
+
setIsKeepingDerivativeTargetPayloads(boolean) - Method in class neureka.Neureka.Settings.Debug
Every derivative is calculated with respect to some graph node.
-
setIsLegacy(boolean) - Method in class neureka.view.NDPrintSettings
+
setIsLegacy(boolean) - Method in class neureka.view.NDPrintSettings
This flag determines the usage of bracket types, where "[1x3]:(1, 2, 3)" would be the legacy version of "(1x3):[1, 2, 3]".
-
setIsLocked(boolean) - Method in class neureka.Neureka.Settings
+
setIsLocked(boolean) - Method in class neureka.Neureka.Settings
Can be used to lock or unlock the settings of the current thread-local Neureka instance.
-
setIsMultiline(boolean) - Method in class neureka.view.NDPrintSettings
+
setIsMultiline(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setIsOnlyUsingDefaultNDConfiguration(boolean) - Method in class neureka.Neureka.Settings.NDim
+
setIsOnlyUsingDefaultNDConfiguration(boolean) - Method in class neureka.Neureka.Settings.NDim
Setting this flag determines which NDConfiguration implementations should be used for nd-arrays/tensors.
-
setIsPreventingInlineOperations(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
+
setIsPreventingInlineOperations(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
Inline operations are operations where the data of a tensor passed into an operation is being modified.
-
setIsRetainingPendingErrorForJITProp(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
+
setIsRetainingPendingErrorForJITProp(boolean) - Method in class neureka.Neureka.Settings.AutoGrad
This flag enables an optimization technique which only propagates error values to gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them @@ -7997,2283 +7952,2257 @@

S

If the flag is set to true
then error values will accumulate at such junction nodes.
-
setIsScientific(boolean) - Method in class neureka.view.NDPrintSettings
+
setIsScientific(boolean) - Method in class neureka.view.NDPrintSettings
 
-
setIsSuitableFor(SuitabilityPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
+
setIsSuitableFor(SuitabilityPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunAlgorithm
The SuitabilityPredicate received by this method checks if a given instance of an ExecutionCall is suitable to be executed in ImplementationFor instances residing in this Algorithm as components.
-
setIsSuitableFor(SuitabilityPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
setIsSuitableFor(SuitabilityPredicate) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
The SuitabilityPredicate received by this method checks if a given instance of an ExecutionCall is suitable to be executed in ImplementationFor instances residing in this Algorithm as components.
-
setIsVirtual(boolean) - Method in interface neureka.MutateTensor
+
setIsVirtual(boolean) - Method in interface neureka.MutateTensor
Virtualizing is the opposite to actualizing a tensor.
-
setItemAt(int, T) - Method in interface neureka.MutateNda
+
setItemAt(int, T) - Method in interface neureka.MutateNda
An NDArray implementation ought to have some way to selectively modify its underlying value.
-
setItemAt(int, T) - Method in interface neureka.MutateTensor
+
setItemAt(int, T) - Method in interface neureka.MutateTensor
An NDArray implementation ought to have some way to selectively modify its underlying value.
-
setItems(Object) - Method in interface neureka.MutateNda
+
setItems(Object) - Method in interface neureka.MutateNda
This method will receive an object an try to interpret it or its contents to be set as value for this nd-array.
-
setItems(Object) - Method in interface neureka.MutateTensor
+
setItems(Object) - Method in interface neureka.MutateTensor
This method will receive an object an try to interpret it or its contents to be set as value for this nd-array.
-
setNDConf(NDConfiguration) - Method in interface neureka.MutateTensor
+
setNDConf(NDConfiguration) - Method in interface neureka.MutateTensor
This method sets the NDConfiguration of this NDArray.
-
setPostfix(String) - Method in class neureka.view.NDPrintSettings
+
setPostfix(String) - Method in class neureka.view.NDPrintSettings
 
-
setPrefix(String) - Method in class neureka.view.NDPrintSettings
+
setPrefix(String) - Method in class neureka.view.NDPrintSettings
 
-
setRowLimit(int) - Method in class neureka.view.NDPrintSettings
+
setRowLimit(int) - Method in class neureka.view.NDPrintSettings
Very large tensors with a rank larger than 1 might take a lot - of vertical space when converted to a String.
+ of vertical space when converted to a String.
-
setRqsGradient(boolean) - Method in interface neureka.Tensor
+
setRqsGradient(boolean) - Method in interface neureka.Tensor
Setting this flag to true will tell the autograd system to accumulate gradients at this tensor.
-
setSupplyADActionFor(ADActionSupplier) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
+
setSupplyADActionFor(ADActionSupplier) - Method in class neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm
This method receives a ADActionSupplier which will supply ADAction instances which can perform backward and forward auto differentiation.
-
setter(At<Object, AxisFrame.Set<ValueType>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
+
setter(At<Object, AxisFrame.Set<ValueType>>) - Method in class neureka.framing.fluent.AxisFrame.Builder
 
-
settings() - Method in class neureka.Neureka
+
settings() - Method in class neureka.Neureka
 
-
settings(Object) - Method in class neureka.Neureka
+
settings(Object) - Method in class neureka.Neureka
This allows you to configure Neureka using a Groovy DSL.
-
SettingsLoader - Class in neureka.common.utility
+
SettingsLoader - Class in neureka.common.utility
This class is a helper class for Neureka instances (Thread local singletons).
-
SGD<V> - Class in neureka.optimization.implementations
+
SGD<V> - Class in neureka.optimization.implementations
Stochastic Gradient Descent is an iterative optimization technique that uses the gradient of a weight variable to adjust said variable, in order to reduce the error used to calculate said gradient.
-
SGD - Static variable in interface neureka.optimization.Optimizer
+
SGD - Static variable in interface neureka.optimization.Optimizer
 
-
SGDFactory - Class in neureka.optimization.implementations
+
SGDFactory - Class in neureka.optimization.implementations
 
-
SGDFactory() - Constructor for class neureka.optimization.implementations.SGDFactory
+
SGDFactory() - Constructor for class neureka.optimization.implementations.SGDFactory
 
-
shallowClone() - Method in interface neureka.Tensor
+
shallowClone() - Method in interface neureka.Tensor
 
-
shallowCopy() - Method in interface neureka.Nda
+
shallowCopy() - Method in interface neureka.Nda
This creates a copy where the underlying data is still the same.
-
shallowCopy() - Method in interface neureka.Tensor
+
shallowCopy() - Method in interface neureka.Tensor
This creates a copy where the underlying data is still the same.
-
shape() - Method in interface neureka.ndim.config.NDConfiguration
+
shape() - Method in interface neureka.ndim.config.NDConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
shape(int) - Method in interface neureka.ndim.config.NDConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
shape() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
shape() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
shape() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
shape() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
shape() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
shape() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
shape() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
This method returns an array of axis sizes.
-
shape() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
shape(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
-
This method returns an array of axis sizes.
+
This method receives an axis index and return the + size of the axis.
-
shape() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
shape() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
This method returns an array of axis sizes.
-
shape() - Method in interface neureka.ndim.iterator.NDIterator
-
 
-
shape() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
-
shape() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
-
 
-
shape() - Method in interface neureka.ndim.NDimensional
-
 
-
shape(int) - Method in interface neureka.ndim.config.NDConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
shape() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
shape() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
shape() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
shape() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
shape() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
shape() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
shape() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
shape() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
-
This method receives an axis index and return the - size of the axis.
+
This method returns an array of axis sizes.
-
shape(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
shape(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
This method receives an axis index and return the size of the axis.
-
shape(int) - Method in interface neureka.ndim.iterator.NDIterator
+
shape(int) - Method in interface neureka.ndim.iterator.NDIterator
 
-
shape(int) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
-
shape(int) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
shape() - Method in interface neureka.ndim.iterator.NDIterator
 
-
shape(int) - Method in interface neureka.ndim.NDimensional
+
shape(int) - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
shape() - Method in class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
shape(int) - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
 
+
shape() - Method in class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
 
+
shape() - Method in interface neureka.ndim.NDimensional
+
 
+
shape(int) - Method in interface neureka.ndim.NDimensional
This method receives an axis index and return the size of the targeted axis / dimension.
-
Shape - Interface in neureka
+
Shape - Interface in neureka
Basically a tuple of integers which is used to describe the shape of an array.
-
shaped(int...) - Static method in interface neureka.Nda
+
shaped(int...) - Static method in interface neureka.Nda
Returns a Collector that accumulates the input elements into a new Nda with the specified shape.
-
shaped(int...) - Static method in interface neureka.Tensor
+
shaped(int...) - Static method in interface neureka.Tensor
Returns a Collector that accumulates the input elements into a new Tensor with the specified shape.
-
shaped(Shape) - Static method in interface neureka.Tensor
+
shaped(Shape) - Static method in interface neureka.Tensor
Returns a Collector that accumulates the input elements into a new Tensor with the specified shape.
-
shapeOfCon(int[], int[]) - Static method in class neureka.backend.main.operations.ConvUtil
+
shapeOfCon(int[], int[]) - Static method in class neureka.backend.main.operations.ConvUtil
+
 
+
shapeString(int[]) - Static method in class neureka.ndim.NDUtil
 
-
shapeString(int[]) - Static method in class neureka.ndim.NDUtil
+
shortToBigInteger(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
shortToBigInteger(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
shortToByte(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
shortToByte(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
shortToDouble(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
shortToDouble(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
shortToFloat(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
shortToFloat(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
shortToInt(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
shortToInt(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
shortToLong(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
 
-
shortToLong(short[]) - Static method in class neureka.common.utility.DataConverter.Utility
+
sig(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSigmoid
 
-
sig() - Method in interface neureka.Tensor
+
sig() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
sig(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSigmoid
+
SIGMOID - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
sigmoid() - Method in class neureka.math.Functions
+
Sigmoid - Class in neureka.backend.main.operations.functions
 
-
sigmoid() - Method in interface neureka.Tensor
+
Sigmoid() - Constructor for class neureka.backend.main.operations.functions.Sigmoid
 
-
Sigmoid - Class in neureka.backend.main.operations.functions
+
sigmoid() - Method in class neureka.math.Functions
 
-
Sigmoid() - Constructor for class neureka.backend.main.operations.functions.Sigmoid
+
sigmoid() - Method in interface neureka.Tensor
 
-
SIGMOID - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
signed() - Method in class neureka.dtype.custom.F32
 
-
signed() - Method in class neureka.dtype.custom.F32
+
signed() - Method in class neureka.dtype.custom.F64
 
-
signed() - Method in class neureka.dtype.custom.F64
+
signed() - Method in class neureka.dtype.custom.I16
 
-
signed() - Method in class neureka.dtype.custom.I16
+
signed() - Method in class neureka.dtype.custom.I32
 
-
signed() - Method in class neureka.dtype.custom.I32
+
signed() - Method in class neureka.dtype.custom.I64
 
-
signed() - Method in class neureka.dtype.custom.I64
+
signed() - Method in class neureka.dtype.custom.I8
 
-
signed() - Method in class neureka.dtype.custom.I8
+
signed() - Method in class neureka.dtype.custom.UI16
 
-
signed() - Method in class neureka.dtype.custom.UI16
+
signed() - Method in class neureka.dtype.custom.UI32
 
-
signed() - Method in class neureka.dtype.custom.UI32
+
signed() - Method in class neureka.dtype.custom.UI64
 
-
signed() - Method in class neureka.dtype.custom.UI64
+
signed() - Method in class neureka.dtype.custom.UI8
 
-
signed() - Method in class neureka.dtype.custom.UI8
-
 
-
signed() - Method in interface neureka.dtype.NumericType
+
signed() - Method in interface neureka.dtype.NumericType
This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
-
silu() - Method in class neureka.math.Functions
+
SILU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
 
+
silu(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSiLU
+
 
+
SiLU - Class in neureka.backend.main.operations.functions
The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x).
-
silu(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSiLU
+
SiLU() - Constructor for class neureka.backend.main.operations.functions.SiLU
 
-
SiLU - Class in neureka.backend.main.operations.functions
+
silu() - Method in class neureka.math.Functions
The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x).
-
SiLU() - Constructor for class neureka.backend.main.operations.functions.SiLU
-
 
-
SILU - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
-
 
-
similarity(String, String) - Static method in class neureka.math.parsing.ParseUtil
+
similarity(String, String) - Static method in class neureka.math.parsing.ParseUtil
-
This method estimates the similarity between 2 provided String instances.
+
This method estimates the similarity between 2 provided String instances.
-
SIMPLE - Enum constant in enum class neureka.autograd.GraphNode.Print
-
 
-
SIMPLE - Enum constant in enum class neureka.ndim.config.NDTrait
+
Simple0DConfiguration - Class in neureka.ndim.config.types.simple
 
-
Simple0DConfiguration - Class in neureka.ndim.config.types.simple
+
Simple1DCIterator - Class in neureka.ndim.iterator.types.simple
 
-
Simple1DCIterator - Class in neureka.ndim.iterator.types.simple
+
Simple1DCIterator(Simple1DConfiguration) - Constructor for class neureka.ndim.iterator.types.simple.Simple1DCIterator
 
-
Simple1DCIterator(Simple1DConfiguration) - Constructor for class neureka.ndim.iterator.types.simple.Simple1DCIterator
+
Simple1DConfiguration - Class in neureka.ndim.config.types.simple
 
-
Simple1DConfiguration - Class in neureka.ndim.config.types.simple
+
Simple1DConfiguration(int, int) - Constructor for class neureka.ndim.config.types.simple.Simple1DConfiguration
 
-
Simple1DConfiguration(int, int) - Constructor for class neureka.ndim.config.types.simple.Simple1DConfiguration
+
Simple2DCIterator - Class in neureka.ndim.iterator.types.simple
 
-
Simple2DCIterator - Class in neureka.ndim.iterator.types.simple
+
Simple2DCIterator(Simple2DConfiguration) - Constructor for class neureka.ndim.iterator.types.simple.Simple2DCIterator
 
-
Simple2DCIterator(Simple2DConfiguration) - Constructor for class neureka.ndim.iterator.types.simple.Simple2DCIterator
+
Simple2DConfiguration - Class in neureka.ndim.config.types.simple
 
-
Simple2DConfiguration - Class in neureka.ndim.config.types.simple
+
Simple2DConfiguration(int[], int[]) - Constructor for class neureka.ndim.config.types.simple.Simple2DConfiguration
 
-
Simple2DConfiguration(int[], int[]) - Constructor for class neureka.ndim.config.types.simple.Simple2DConfiguration
+
Simple3DCIterator - Class in neureka.ndim.iterator.types.simple
 
-
Simple3DCIterator - Class in neureka.ndim.iterator.types.simple
+
Simple3DCIterator(Simple3DConfiguration) - Constructor for class neureka.ndim.iterator.types.simple.Simple3DCIterator
 
-
Simple3DCIterator(Simple3DConfiguration) - Constructor for class neureka.ndim.iterator.types.simple.Simple3DCIterator
+
Simple3DConfiguration - Class in neureka.ndim.config.types.simple
 
-
Simple3DConfiguration - Class in neureka.ndim.config.types.simple
+
Simple3DConfiguration(int[], int[]) - Constructor for class neureka.ndim.config.types.simple.Simple3DConfiguration
 
-
Simple3DConfiguration(int[], int[]) - Constructor for class neureka.ndim.config.types.simple.Simple3DConfiguration
+
SimpleCLImplementation - Class in neureka.backend.main.implementations
 
-
SimpleCLImplementation - Class in neureka.backend.main.implementations
+
SimpleCLImplementation(ImplementationFor<OpenCLDevice>, int, String, String) - Constructor for class neureka.backend.main.implementations.SimpleCLImplementation
 
-
SimpleCLImplementation(ImplementationFor<OpenCLDevice>, int, String, String) - Constructor for class neureka.backend.main.implementations.SimpleCLImplementation
+
SimpleNDConfiguration - Class in neureka.ndim.config.types.simple
 
-
SimpleNDConfiguration - Class in neureka.ndim.config.types.simple
+
SimpleNDConfiguration(int[], int[]) - Constructor for class neureka.ndim.config.types.simple.SimpleNDConfiguration
 
-
SimpleNDConfiguration(int[], int[]) - Constructor for class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
SimpleReshapeView - Class in neureka.ndim.config.types.views
 
-
SimpleReshapeView - Class in neureka.ndim.config.types.views
+
SimpleReshapeView(int[], NDConfiguration) - Constructor for class neureka.ndim.config.types.views.SimpleReshapeView
 
-
SimpleReshapeView(int[], NDConfiguration) - Constructor for class neureka.ndim.config.types.views.SimpleReshapeView
+
sin() - Method in class neureka.math.Functions
 
-
sin() - Method in class neureka.math.Functions
-
 
-
sin() - Method in interface neureka.Tensor
+
sin() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
singleFPConfig() - Method in class neureka.devices.opencl.OpenCLDevice
+
singleFPConfig() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
Sinus - Class in neureka.backend.main.operations.functions
+
SINUS - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
Sinus() - Constructor for class neureka.backend.main.operations.functions.Sinus
+
Sinus - Class in neureka.backend.main.operations.functions
 
-
SINUS - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
Sinus() - Constructor for class neureka.backend.main.operations.functions.Sinus
 
-
size() - Method in class neureka.autograd.GraphNode
+
size() - Method in class neureka.autograd.GraphNode
This is the number of AD-actions stored inside this node.
-
size() - Method in class neureka.backend.api.BackendContext
+
size() - Method in class neureka.backend.api.BackendContext
+
 
+
size() - Method in class neureka.common.utility.Cache
 
-
size() - Method in class neureka.common.utility.Cache
+
size() - Method in interface neureka.ndim.config.NDConfiguration
 
-
size() - Method in interface neureka.ndim.config.NDConfiguration
+
size() - Method in interface neureka.ndim.NDimensional
 
-
size() - Method in interface neureka.ndim.NDimensional
+
size() - Method in interface neureka.Shape
 
-
size() - Method in interface neureka.Shape
+
sizeOfShape(int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
 
-
sizeOfShape(int[]) - Static method in class neureka.ndim.config.NDConfiguration.Utility
+
Slice - Class in neureka.backend.main.operations.other
 
-
slice() - Method in interface neureka.Nda
+
Slice() - Constructor for class neureka.backend.main.operations.other.Slice
+
 
+
slice(Object[], Tensor<ValType>) - Static method in class neureka.fluent.slicing.SmartSlicer
+
 
+
slice() - Method in interface neureka.Nda
This method returns a SliceBuilder instance exposing a simple builder API which enables the configuration of a slice of the current nd-array via method chaining.
-
slice() - Method in interface neureka.Tensor
+
slice(int, int) - Method in interface neureka.Shape
+
 
+
slice(int) - Method in interface neureka.Shape
+
 
+
slice() - Method in interface neureka.Tensor
This method returns a SliceBuilder instance exposing a simple builder API which enables the configuration of a slice of the current nd-array via method chaining.
-
slice(int) - Method in interface neureka.Shape
-
 
-
slice(int, int) - Method in interface neureka.Shape
-
 
-
slice(Object[], Tensor<ValType>) - Static method in class neureka.fluent.slicing.SmartSlicer
-
 
-
Slice - Class in neureka.backend.main.operations.other
-
 
-
Slice() - Constructor for class neureka.backend.main.operations.other.Slice
-
 
-
SliceBuilder<V> - Class in neureka.fluent.slicing
+
SliceBuilder<V> - Class in neureka.fluent.slicing
This class is the heart of the slice builder API, collecting range configurations by exposing an API consisting of multiple interfaces which form a call state transition graph.
-
SliceBuilder(Tensor<V>) - Constructor for class neureka.fluent.slicing.SliceBuilder
+
SliceBuilder(Tensor<V>) - Constructor for class neureka.fluent.slicing.SliceBuilder
An instance of a slice builder does not perform the actual slicing itself! Instead, it merely serves as a collector of slice configuration data.
-
sliceCount() - Method in interface neureka.Nda
+
sliceCount() - Method in interface neureka.Nda
This method returns the number of slices which have been created from this nd-array.
-
sliceCount() - Method in interface neureka.Tensor
+
sliceCount() - Method in interface neureka.Tensor
This method returns the number of slices which have been created from this nd-array.
-
Sliced0DConfiguration - Class in neureka.ndim.config.types.sliced
+
Sliced0DConfiguration - Class in neureka.ndim.config.types.sliced
 
-
Sliced0DConfiguration(int, int) - Constructor for class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
Sliced0DConfiguration(int, int) - Constructor for class neureka.ndim.config.types.sliced.Sliced0DConfiguration
 
-
Sliced1DCIterator - Class in neureka.ndim.iterator.types.sliced
+
Sliced1DCIterator - Class in neureka.ndim.iterator.types.sliced
 
-
Sliced1DCIterator(Sliced1DConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
+
Sliced1DCIterator(Sliced1DConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.Sliced1DCIterator
 
-
Sliced1DConfiguration - Class in neureka.ndim.config.types.sliced
+
Sliced1DConfiguration - Class in neureka.ndim.config.types.sliced
 
-
Sliced1DConfiguration(int, int, int, int, int) - Constructor for class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
Sliced1DConfiguration(int, int, int, int, int) - Constructor for class neureka.ndim.config.types.sliced.Sliced1DConfiguration
 
-
Sliced2DCIterator - Class in neureka.ndim.iterator.types.sliced
+
Sliced2DCIterator - Class in neureka.ndim.iterator.types.sliced
 
-
Sliced2DCIterator(Sliced2DConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
+
Sliced2DCIterator(Sliced2DConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.Sliced2DCIterator
 
-
Sliced2DConfiguration - Class in neureka.ndim.config.types.sliced
+
Sliced2DConfiguration - Class in neureka.ndim.config.types.sliced
 
-
Sliced2DConfiguration(int[], int[], int[], int[], int[]) - Constructor for class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
Sliced2DConfiguration(int[], int[], int[], int[], int[]) - Constructor for class neureka.ndim.config.types.sliced.Sliced2DConfiguration
 
-
Sliced3DCIterator - Class in neureka.ndim.iterator.types.sliced
+
Sliced3DCIterator - Class in neureka.ndim.iterator.types.sliced
 
-
Sliced3DCIterator(Sliced3DConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
+
Sliced3DCIterator(Sliced3DConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.Sliced3DCIterator
 
-
Sliced3DConfiguration - Class in neureka.ndim.config.types.sliced
+
Sliced3DConfiguration - Class in neureka.ndim.config.types.sliced
 
-
Sliced3DConfiguration(int[], int[], int[], int[], int[]) - Constructor for class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
Sliced3DConfiguration(int[], int[], int[], int[], int[]) - Constructor for class neureka.ndim.config.types.sliced.Sliced3DConfiguration
 
-
SlicedNDConfiguration - Class in neureka.ndim.config.types.sliced
+
SlicedNDConfiguration - Class in neureka.ndim.config.types.sliced
 
-
SlicedNDConfiguration(int[], int[], int[], int[], int[]) - Constructor for class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
SlicedNDConfiguration(int[], int[], int[], int[], int[]) - Constructor for class neureka.ndim.config.types.sliced.SlicedNDConfiguration
 
-
SlicedNDIterator - Class in neureka.ndim.iterator.types.sliced
+
SlicedNDIterator - Class in neureka.ndim.iterator.types.sliced
 
-
SlicedNDIterator(NDConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.SlicedNDIterator
+
SlicedNDIterator(NDConfiguration) - Constructor for class neureka.ndim.iterator.types.sliced.SlicedNDIterator
 
-
SmartSlicer - Class in neureka.fluent.slicing
+
SmartSlicer - Class in neureka.fluent.slicing
This class is responsible for receiving any input and trying to interpret it so that a slice can be formed.
-
SmartSlicer() - Constructor for class neureka.fluent.slicing.SmartSlicer
+
SmartSlicer() - Constructor for class neureka.fluent.slicing.SmartSlicer
 
-
softmax() - Method in interface neureka.Tensor
+
softmax() - Method in interface neureka.Tensor
 
-
softmax(int) - Method in interface neureka.Tensor
+
softmax(int) - Method in interface neureka.Tensor
 
-
softmax(int...) - Method in interface neureka.Tensor
+
softmax(int...) - Method in interface neureka.Tensor
Calculates the softmax function along the specified axes.
-
softplus() - Method in class neureka.math.Functions
+
SOFTPLUS - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
 
+
Softplus - Class in neureka.backend.main.operations.functions
+
+
SoftPlus is a smooth approximation to the ReLU function and can be used + to constrain the output of a machine to always be positive.
+
+
Softplus() - Constructor for class neureka.backend.main.operations.functions.Softplus
+
 
+
softplus() - Method in class neureka.math.Functions
SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
-
softplus() - Method in interface neureka.Tensor
+
softplus() - Method in interface neureka.Tensor
This method is a functionally identical to the following alternatives:
-
Softplus - Class in neureka.backend.main.operations.functions
-
-
SoftPlus is a smooth approximation to the ReLU function and can be used - to constrain the output of a machine to always be positive.
-
-
Softplus() - Constructor for class neureka.backend.main.operations.functions.Softplus
+
SOFTSIGN - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
 
+
softsign(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSoftsign
 
-
SOFTPLUS - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
softsign(float) - Static method in class neureka.backend.main.implementations.fun.ScalarSoftsign
 
-
softsign() - Method in class neureka.math.Functions
+
Softsign - Class in neureka.backend.main.operations.functions
The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function which rescales the inputs between -1 and 1, very much like the Tanh function.
-
softsign(double) - Static method in class neureka.backend.main.implementations.fun.ScalarSoftsign
+
Softsign() - Constructor for class neureka.backend.main.operations.functions.Softsign
 
-
softsign(float) - Static method in class neureka.backend.main.implementations.fun.ScalarSoftsign
-
 
-
Softsign - Class in neureka.backend.main.operations.functions
+
softsign() - Method in class neureka.math.Functions
The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function which rescales the inputs between -1 and 1, very much like the Tanh function.
-
Softsign() - Constructor for class neureka.backend.main.operations.functions.Softsign
-
 
-
SOFTSIGN - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
spaces(int) - Static method in class neureka.view.NdaAsString.Util
 
-
spaces(int) - Static method in class neureka.view.NdaAsString.Util
-
 
-
spread() - Method in interface neureka.ndim.config.NDConfiguration
+
spread() - Method in interface neureka.ndim.config.NDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
spread(int) - Method in interface neureka.ndim.config.NDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
spread() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
spread() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
spread() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
spread() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
spread() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
spread() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
spread() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
spread(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
spread() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread() - Method in interface neureka.ndim.NDimensional
-
 
-
spread(int) - Method in interface neureka.ndim.config.NDConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
spread() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
spread() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
spread() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
spread() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
spread() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
spread() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
spread() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
spread() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
spread(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
spread(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
-
sqrt() - Method in class neureka.math.Functions
+
spread() - Method in interface neureka.ndim.NDimensional
 
-
sqrt() - Method in interface neureka.Tensor
-
-
This method is a functionally identical to the following alternatives:
-
-
Sqrt - Class in neureka.backend.main.operations.functions
+
SQRT - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
 
+
Sqrt - Class in neureka.backend.main.operations.functions
 
-
Sqrt() - Constructor for class neureka.backend.main.operations.functions.Sqrt
+
Sqrt() - Constructor for class neureka.backend.main.operations.functions.Sqrt
 
-
SQRT - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
sqrt() - Method in class neureka.math.Functions
 
-
StaticKernelSource - Interface in neureka.devices.opencl
+
sqrt() - Method in interface neureka.Tensor
+
+
This method is a functionally identical to the following alternatives:
+
+
StaticKernelSource - Interface in neureka.devices.opencl
 
-
step(double) - Method in class neureka.fluent.building.NdaBuilder
+
step(double) - Method in class neureka.fluent.building.NdaBuilder
 
-
step(double) - Method in interface neureka.fluent.building.states.Step
+
Step<V> - Interface in neureka.fluent.building.states
+
+
This interface defines the last step in the call transition graph of the fluent builder API when + building a Tensor instance populated based on the values within a defined range.
+
+
step(double) - Method in interface neureka.fluent.building.states.Step
This is the last step in the call transition graph of the fluent builder API when building a Tensor instance populated based on the values within a defined range.
-
step(double) - Method in interface neureka.fluent.building.states.StepForTensor
+
step(double) - Method in interface neureka.fluent.building.states.StepForTensor
This is the last step in the call transition graph of the fluent builder API when building a Tensor instance populated based on the values within a defined range.
-
step(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
step(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
This method returns an instance of this very AxisSliceBuilder instance disguised by the AxisOrGet interface.
-
step(int) - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGet
+
step(int) - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGet
This method allows one to specify a step size within the slice range previously specified for the currently sliced axis.
-
step(int) - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGetTensor
+
step(int) - Method in interface neureka.fluent.slicing.states.StepsOrAxisOrGetTensor
This method allows one to specify a step size within the slice range previously specified for the currently sliced axis.
-
Step<V> - Interface in neureka.fluent.building.states
-
-
This interface defines the last step in the call transition graph of the fluent builder API when - building a Tensor instance populated based on the values within a defined range.
-
-
StepForTensor<V> - Interface in neureka.fluent.building.states
+
StepForTensor<V> - Interface in neureka.fluent.building.states
 
-
StepsOrAxisOrGet<V> - Interface in neureka.fluent.slicing.states
+
StepsOrAxisOrGet<V> - Interface in neureka.fluent.slicing.states
This interface extends the AxisOrGet interface which provides the option to either continue slicing another axis or simply trigger the creation and return of a slice instance based on the already provided slice configuration.
-
StepsOrAxisOrGetTensor<V> - Interface in neureka.fluent.slicing.states
+
StepsOrAxisOrGetTensor<V> - Interface in neureka.fluent.slicing.states
 
-
Storage<V> - Interface in neureka.devices
+
Storage<V> - Interface in neureka.devices
This is an abstract interface which simply describes "a thing that stores tensors".
-
store(Tensor<T>) - Method in class neureka.devices.AbstractDevice
+
store(Tensor<T>) - Method in class neureka.devices.AbstractDevice
Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type.
-
store(Tensor<T>) - Method in class neureka.devices.file.CSVHandle
+
store(Tensor<T>) - Method in class neureka.devices.file.CSVHandle
 
-
store(Tensor<T>) - Method in class neureka.devices.file.FileDevice
+
store(Tensor<T>) - Method in class neureka.devices.file.FileDevice
Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type.
-
store(Tensor<T>) - Method in class neureka.devices.file.IDXHandle
-
 
-
store(Tensor<T>) - Method in class neureka.devices.host.CPU
-
 
-
store(Tensor<T>) - Method in interface neureka.devices.Storage
+
store(Tensor<T>, String) - Method in class neureka.devices.file.FileDevice
-
Implementations of this method ought to store the data - of the tensor in whatever formant suites the underlying - implementation and or final type.
+
Stores the given tensor in the file system with the given filename.
-
store(Tensor<T>, String) - Method in class neureka.devices.file.FileDevice
+
store(Tensor<T>, String, Map<String, Object>) - Method in class neureka.devices.file.FileDevice
Stores the given tensor in the file system with the given filename.
-
store(Tensor<T>, String, Map<String, Object>) - Method in class neureka.devices.file.FileDevice
+
store(Tensor<T>) - Method in class neureka.devices.file.IDXHandle
+
 
+
store(Tensor<T>) - Method in class neureka.devices.host.CPU
+
 
+
store(Tensor<T>) - Method in interface neureka.devices.Storage
-
Stores the given tensor in the file system with the given filename.
+
Implementations of this method ought to store the data + of the tensor in whatever formant suites the underlying + implementation and or final type.
-
stream() - Method in interface neureka.Nda
+
stream() - Method in interface neureka.Nda
 
-
stream() - Method in interface neureka.Shape
+
stream() - Method in interface neureka.Shape
 
-
strides() - Method in interface neureka.ndim.config.NDConfiguration
+
strides() - Method in interface neureka.ndim.config.NDConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
strides(int) - Method in interface neureka.ndim.config.NDConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
strides() - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
strides() - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
strides() - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
strides() - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
strides() - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
strides() - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
strides() - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
strides(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
-
The array returned by this method is used to translate an array - of axis indices to a single ata array index.
+
This method receives an axis index and returns the + translation value for the targeted axis.
-
strides() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
strides() - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
The array returned by this method is used to translate an array of axis indices to a single ata array index.
-
strides() - Method in interface neureka.ndim.NDimensional
-
 
-
strides(int) - Method in interface neureka.ndim.config.NDConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
strides() - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
strides() - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.permuted.PermutedNDConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.simple.Simple0DConfiguration
+
strides() - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
strides() - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.simple.SimpleNDConfiguration
+
strides() - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced0DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
strides() - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
strides() - Method in class neureka.ndim.config.types.views.SimpleReshapeView
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.sliced.SlicedNDConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
This method receives an axis index and returns the translation value for the targeted axis.
-
strides(int) - Method in class neureka.ndim.config.types.views.SimpleReshapeView
+
strides() - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
-
This method receives an axis index and returns the - translation value for the targeted axis.
+
The array returned by this method is used to translate an array + of axis indices to a single ata array index.
-
strides(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
+
strides(int) - Method in class neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
This method receives an axis index and returns the translation value for the targeted axis.
-
stringifier(OperationBuilder.Stringifier) - Method in class neureka.backend.api.template.operations.OperationBuilder
+
strides() - Method in interface neureka.ndim.NDimensional
 
-
stringify(String[]) - Method in interface neureka.backend.api.Operation
+
stringifier(OperationBuilder.Stringifier) - Method in class neureka.backend.api.template.operations.OperationBuilder
 
-
stringify(String[]) - Method in class neureka.backend.api.template.operations.AbstractOperation
-
stringify(String[]) - Method in interface neureka.backend.api.template.operations.OperationBuilder.Stringifier
+
stringify(String[]) - Method in interface neureka.backend.api.Operation
 
-
stringify(String[]) - Method in class neureka.backend.main.operations.functions.SiLU
+
stringify(String[]) - Method in class neureka.backend.api.template.operations.AbstractOperation
+
stringify(String[]) - Method in interface neureka.backend.api.template.operations.OperationBuilder.Stringifier
 
-
stringify(String[]) - Method in class neureka.backend.main.operations.linear.XConvLeft
+
stringify(String[]) - Method in class neureka.backend.main.operations.linear.XConvLeft
 
-
stringify(String[]) - Method in class neureka.backend.main.operations.linear.XConvRight
+
stringify(String[]) - Method in class neureka.backend.main.operations.linear.XConvRight
 
-
stringify(String[]) - Method in class neureka.backend.main.operations.other.AssignLeft
+
stringify(String[]) - Method in class neureka.backend.main.operations.other.AssignLeft
 
-
stringify(String[]) - Method in class neureka.backend.main.operations.other.Permute
+
stringify(String[]) - Method in class neureka.backend.main.operations.other.Permute
 
-
submit(int, CPU.IndexedWorkload) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
+
submit(int, CPU.IndexedWorkload) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
 
-
Subtraction - Class in neureka.backend.main.operations.operator
+
Subtraction - Class in neureka.backend.main.operations.operator
 
-
Subtraction() - Constructor for class neureka.backend.main.operations.operator.Subtraction
+
Subtraction() - Constructor for class neureka.backend.main.operations.operator.Subtraction
 
-
suitabilityIfValid(float) - Method in class neureka.backend.api.Call.Validator
+
suitabilityIfValid(float) - Method in class neureka.backend.api.Call.Validator
 
-
SuitabilityPredicate - Interface in neureka.backend.api.fun
+
SuitabilityPredicate - Interface in neureka.backend.api.fun
The SuitabilityPredicate checks if a given instance of an ExecutionCall is suitable to be executed in ImplementationFor residing in this Algorithm as components.
-
sum() - Method in class neureka.math.Functions
+
Sum - Class in neureka.backend.main.operations.other
 
-
sum() - Method in interface neureka.Tensor
+
Sum() - Constructor for class neureka.backend.main.operations.other.Sum
+
 
+
sum() - Method in class neureka.math.Functions
+
 
+
sum() - Method in interface neureka.Tensor
Calculate the sum value of all values within this tensor and returns it in the form of a scalar tensor.
-
sum(int) - Method in interface neureka.Tensor
+
sum(int) - Method in interface neureka.Tensor
Calculate the sum value of all values within this tensor along the specified axis and returns it in the form of a tensor.
-
sum(int...) - Method in interface neureka.Tensor
+
sum(int...) - Method in interface neureka.Tensor
Calculate the sum value of all values within this tensor along the specified axes and returns it in the form of a tensor.
-
Sum - Class in neureka.backend.main.operations.other
-
 
-
Sum() - Constructor for class neureka.backend.main.operations.other.Sum
-
 
-
SumAlgorithm - Class in neureka.backend.main.algorithms
+
SumAlgorithm - Class in neureka.backend.main.algorithms
 
-
SumAlgorithm() - Constructor for class neureka.backend.main.algorithms.SumAlgorithm
+
SumAlgorithm() - Constructor for class neureka.backend.main.algorithms.SumAlgorithm
 
-
Summation - Class in neureka.backend.main.operations.indexer
+
Summation - Class in neureka.backend.main.operations.indexer
This type of operation belongs to the same species as the Product operation.
-
Summation() - Constructor for class neureka.backend.main.operations.indexer.Summation
+
Summation() - Constructor for class neureka.backend.main.operations.indexer.Summation
 
-
supplyADActionFor(Function, ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.ADActionSupplier
+
supplyADActionFor(Function, ExecutionCall<? extends Device<?>>) - Method in interface neureka.backend.api.fun.ADActionSupplier
This method ought to return a new instance if the ADAction class responsible for performing automatic differentiation both for forward and backward mode differentiation.
-
supplyADActionFor(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
+
supplyADActionFor(Function, ExecutionCall<? extends Device<?>>) - Method in class neureka.backend.api.template.algorithms.FallbackAlgorithm
 
-
supports(Class<T>) - Method in interface neureka.backend.api.Operation
+
supports(Class<T>) - Method in interface neureka.backend.api.Operation
 
-
supports(Class<T>) - Method in class neureka.backend.api.template.operations.AbstractOperation
+
supports(Class<T>) - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
supportsAlgorithm(Class<T>) - Method in interface neureka.backend.api.Operation
+
supportsAlgorithm(Class<T>) - Method in interface neureka.backend.api.Operation
This method checks if this Operation contains an instance of the Algorithm implementation specified via its type class.
-
supportsAlgorithm(Class<T>) - Method in class neureka.backend.api.template.operations.AbstractOperation
+
supportsAlgorithm(Class<T>) - Method in class neureka.backend.api.template.operations.AbstractOperation
This method checks if this Operation contains an instance of the Algorithm implementation specified via its type class.
-
SYMMETRIC - Enum constant in enum class neureka.ndim.config.NDConfiguration.Layout
-
 
-

T

-
-
T() - Method in interface neureka.Tensor
+ + + +

T

+
+
T() - Method in interface neureka.Tensor
Creates and returns a new Tensor instance which is a transposed twin of this instance.
- This is a shorter alternative to the functionally identical Tensor.getT() method.
+ This is a shorter alternative to the functionally identical Tensor.getT() method.
-
tanh() - Method in class neureka.math.Functions
+
TANH - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
tanh() - Method in interface neureka.Tensor
-
-
This method is a functionally identical to the following alternatives:
-
-
tanh(double) - Static method in class neureka.backend.main.implementations.fun.ScalarTanh
+
tanh(double) - Static method in class neureka.backend.main.implementations.fun.ScalarTanh
 
-
tanh(float) - Static method in class neureka.backend.main.implementations.fun.ScalarTanh
+
tanh(float) - Static method in class neureka.backend.main.implementations.fun.ScalarTanh
 
-
Tanh - Class in neureka.backend.main.operations.functions
+
Tanh - Class in neureka.backend.main.operations.functions
 
-
Tanh() - Constructor for class neureka.backend.main.operations.functions.Tanh
+
Tanh() - Constructor for class neureka.backend.main.operations.functions.Tanh
 
-
TANH - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
tanh() - Method in class neureka.math.Functions
 
-
TANH_FAST - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
+
tanh() - Method in interface neureka.Tensor
+
+
This method is a functionally identical to the following alternatives:
+
+
TANH_FAST - Static variable in interface neureka.backend.main.implementations.fun.api.ScalarFun
 
-
TanhFast - Class in neureka.backend.main.operations.functions
+
TanhFast - Class in neureka.backend.main.operations.functions
 
-
TanhFast() - Constructor for class neureka.backend.main.operations.functions.TanhFast
+
TanhFast() - Constructor for class neureka.backend.main.operations.functions.TanhFast
 
-
targetArrayType() - Method in class neureka.dtype.custom.F32
+
targetArrayType() - Method in class neureka.dtype.custom.F32
 
-
targetArrayType() - Method in class neureka.dtype.custom.F64
+
targetArrayType() - Method in class neureka.dtype.custom.F64
 
-
targetArrayType() - Method in class neureka.dtype.custom.I16
+
targetArrayType() - Method in class neureka.dtype.custom.I16
 
-
targetArrayType() - Method in class neureka.dtype.custom.I32
+
targetArrayType() - Method in class neureka.dtype.custom.I32
 
-
targetArrayType() - Method in class neureka.dtype.custom.I64
+
targetArrayType() - Method in class neureka.dtype.custom.I64
 
-
targetArrayType() - Method in class neureka.dtype.custom.I8
+
targetArrayType() - Method in class neureka.dtype.custom.I8
 
-
targetArrayType() - Method in class neureka.dtype.custom.UI16
+
targetArrayType() - Method in class neureka.dtype.custom.UI16
 
-
targetArrayType() - Method in class neureka.dtype.custom.UI32
+
targetArrayType() - Method in class neureka.dtype.custom.UI32
 
-
targetArrayType() - Method in class neureka.dtype.custom.UI64
+
targetArrayType() - Method in class neureka.dtype.custom.UI64
 
-
targetArrayType() - Method in class neureka.dtype.custom.UI8
+
targetArrayType() - Method in class neureka.dtype.custom.UI8
 
-
targetArrayType() - Method in interface neureka.dtype.NumericType
+
targetArrayType() - Method in interface neureka.dtype.NumericType
The target type is the targeted JVM data-type which can represent the holder type.
-
targetToForeignHolderBytes(Byte) - Method in class neureka.dtype.custom.I8
+
targetToForeignHolderBytes(Float) - Method in class neureka.dtype.custom.F32
 
-
targetToForeignHolderBytes(Double) - Method in class neureka.dtype.custom.F64
+
targetToForeignHolderBytes(Double) - Method in class neureka.dtype.custom.F64
 
-
targetToForeignHolderBytes(Float) - Method in class neureka.dtype.custom.F32
+
targetToForeignHolderBytes(Short) - Method in class neureka.dtype.custom.I16
 
-
targetToForeignHolderBytes(Integer) - Method in class neureka.dtype.custom.I32
+
targetToForeignHolderBytes(Integer) - Method in class neureka.dtype.custom.I32
 
-
targetToForeignHolderBytes(Integer) - Method in class neureka.dtype.custom.UI16
+
targetToForeignHolderBytes(Long) - Method in class neureka.dtype.custom.I64
 
-
targetToForeignHolderBytes(Long) - Method in class neureka.dtype.custom.I64
+
targetToForeignHolderBytes(Byte) - Method in class neureka.dtype.custom.I8
 
-
targetToForeignHolderBytes(Long) - Method in class neureka.dtype.custom.UI32
+
targetToForeignHolderBytes(Integer) - Method in class neureka.dtype.custom.UI16
 
-
targetToForeignHolderBytes(Short) - Method in class neureka.dtype.custom.I16
+
targetToForeignHolderBytes(Long) - Method in class neureka.dtype.custom.UI32
 
-
targetToForeignHolderBytes(Short) - Method in class neureka.dtype.custom.UI8
+
targetToForeignHolderBytes(BigInteger) - Method in class neureka.dtype.custom.UI64
 
-
targetToForeignHolderBytes(BigInteger) - Method in class neureka.dtype.custom.UI64
+
targetToForeignHolderBytes(Short) - Method in class neureka.dtype.custom.UI8
 
-
targetToForeignHolderBytes(TargetType) - Method in interface neureka.dtype.NumericType
+
targetToForeignHolderBytes(TargetType) - Method in interface neureka.dtype.NumericType
 
-
targetType() - Method in class neureka.dtype.custom.F32
+
targetType() - Method in class neureka.dtype.custom.F32
 
-
targetType() - Method in class neureka.dtype.custom.F64
+
targetType() - Method in class neureka.dtype.custom.F64
 
-
targetType() - Method in class neureka.dtype.custom.I16
+
targetType() - Method in class neureka.dtype.custom.I16
 
-
targetType() - Method in class neureka.dtype.custom.I32
+
targetType() - Method in class neureka.dtype.custom.I32
 
-
targetType() - Method in class neureka.dtype.custom.I64
+
targetType() - Method in class neureka.dtype.custom.I64
 
-
targetType() - Method in class neureka.dtype.custom.I8
+
targetType() - Method in class neureka.dtype.custom.I8
 
-
targetType() - Method in class neureka.dtype.custom.UI16
+
targetType() - Method in class neureka.dtype.custom.UI16
 
-
targetType() - Method in class neureka.dtype.custom.UI32
+
targetType() - Method in class neureka.dtype.custom.UI32
 
-
targetType() - Method in class neureka.dtype.custom.UI64
+
targetType() - Method in class neureka.dtype.custom.UI64
 
-
targetType() - Method in class neureka.dtype.custom.UI8
+
targetType() - Method in class neureka.dtype.custom.UI8
 
-
targetType() - Method in interface neureka.dtype.NumericType
+
targetType() - Method in interface neureka.dtype.NumericType
The target type is the targeted JVM data-type which can represent the holder type.
-
Tensor<V> - Interface in neureka
+
Tensor<V> - Interface in neureka
A Tensor is a mathematical concept and type of multidimensional data-structure with certain transformation properties.
-
Tensor.ImageType - Enum Class in neureka
+
Tensor.ImageType - Enum in neureka
-
Use this enum as argument for the Tensor.asImage(Tensor.ImageType) method to +
Use this enum as argument for the Tensor.asImage(Tensor.ImageType) method to specify the type of image that should be returned.
-
tensors(Call.TensorsCondition) - Method in class neureka.backend.api.Call.Validator
+
tensors(Call.TensorsCondition) - Method in class neureka.backend.api.Call.Validator
 
-
TERRIBLE - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
TERRIBLE - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
THREAD_PREFIX - Static variable in class neureka.devices.host.CPU
+
THREAD_PREFIX - Static variable in class neureka.devices.host.CPU
 
-
threaded(int, int, CPU.RangeWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
+
threaded(int, CPU.RangeWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
-
Takes the provided range and divides it into multithreaded workloads.
+
This method slices the provided workload size into multiple ranges which can be executed in parallel.
-
threaded(int, CPU.IndexedWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
+
threaded(int, CPU.IndexedWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
Executes the provided workload lambda across multiple threads where the provided worker lambda will receive the index/id of the current worker.
-
threaded(int, CPU.RangeWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
+
threaded(int, int, CPU.RangeWorkload) - Method in class neureka.devices.host.CPU.JVMExecutor
-
This method slices the provided workload size into multiple ranges which can be executed in parallel.
+
Takes the provided range and divides it into multithreaded workloads.
-
threads - Variable in class neureka.devices.host.machine.BasicMachine
+
threads - Variable in class neureka.devices.host.machine.BasicMachine
 
-
THREADS - Enum constant in enum class neureka.devices.host.concurrent.Parallelism
-
-
The total number of threads (incl.
-
-
threshold(int) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
+
threshold(int) - Method in class neureka.devices.host.concurrent.WorkScheduler.Divider
 
-
times(Tensor<V>) - Method in interface neureka.Tensor
+
times(Tensor<V>) - Method in interface neureka.Tensor
-
This is a functionally identical synonym to the Tensor.multiply(Tensor) method.
+
This is a functionally identical synonym to the Tensor.multiply(Tensor) method.
-
times(V) - Method in interface neureka.Tensor
-
 
-
timesAssign(Tensor<T>) - Method in interface neureka.MutateTensor
+
times(V) - Method in interface neureka.Tensor
 
-
timesAssign(T) - Method in interface neureka.MutateTensor
+
timesAssign(Tensor<T>) - Method in interface neureka.MutateTensor
 
-
to(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
-
-
This method returns an instance of this very AxisSliceBuilder instance - disguised by the StepsOrAxisOrGet interface.
-
-
to(int) - Method in interface neureka.fluent.slicing.states.To
-
-
This is the second part for defining the slice range of a specified axis within - the call transition graph exposed by the slice fluent builder API.
-
-
to(int) - Method in interface neureka.fluent.slicing.states.ToForTensor
-
-
This is the second part for defining the slice range of a specified axis within - the call transition graph exposed by the slice fluent builder API.
-
-
to(String) - Method in interface neureka.Tensor
+
timesAssign(T) - Method in interface neureka.MutateTensor
 
-
to(Device<?>) - Method in interface neureka.Tensor
-
-
This method takes a Device and tries to migrate the contents of this Tensor - instance to that Device!
-
-
to(T) - Static method in class neureka.backend.api.Call
+
to(T) - Static method in class neureka.backend.api.Call
 
-
to(V) - Method in class neureka.fluent.building.NdaBuilder
+
to(V) - Method in class neureka.fluent.building.NdaBuilder
 
-
to(V) - Method in interface neureka.fluent.building.states.To
+
To<V> - Interface in neureka.fluent.building.states
This step in the call transition graph of the fluent builder API is a followup call - from the IterByOrIterFromOrAll.andFillFrom(Object) method which + from the IterByOrIterFromOrAll.andFillFrom(Object) method which expects a range to be specified whose values will be used to populate the Tensor instance.
-
to(V) - Method in interface neureka.fluent.building.states.ToForTensor
+
to(V) - Method in interface neureka.fluent.building.states.To
This step in the call transition graph of the fluent builder API is a followup call - from the IterByOrIterFromOrAll.andFillFrom(Object) method which + from the IterByOrIterFromOrAll.andFillFrom(Object) method which expects a range to be specified whose values will be used to populate the Tensor instance.
-
To<V> - Interface in neureka.fluent.building.states
+
to(V) - Method in interface neureka.fluent.building.states.ToForTensor
This step in the call transition graph of the fluent builder API is a followup call - from the IterByOrIterFromOrAll.andFillFrom(Object) method which + from the IterByOrIterFromOrAll.andFillFrom(Object) method which expects a range to be specified whose values will be used to populate the Tensor instance.
-
To<V> - Interface in neureka.fluent.slicing.states
+
to(int) - Method in class neureka.fluent.slicing.AxisSliceBuilder
+
+
This method returns an instance of this very AxisSliceBuilder instance + disguised by the StepsOrAxisOrGet interface.
+
+
To<V> - Interface in neureka.fluent.slicing.states
This is the second part for defining the slice range of a specified axis within the call transition graph exposed by the slice builder API.
-
toByteArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
to(int) - Method in interface neureka.fluent.slicing.states.To
+
+
This is the second part for defining the slice range of a specified axis within + the call transition graph exposed by the slice fluent builder API.
+
+
to(int) - Method in interface neureka.fluent.slicing.states.ToForTensor
+
+
This is the second part for defining the slice range of a specified axis within + the call transition graph exposed by the slice fluent builder API.
+
+
to(Device<?>) - Method in interface neureka.Tensor
+
+
This method takes a Device and tries to migrate the contents of this Tensor + instance to that Device!
+
+
to(String) - Method in interface neureka.Tensor
+
 
+
toByteArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
 
+
toDoubleArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
 
-
toDoubleArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
toFloatArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
 
-
toFloatArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
ToForTensor<V> - Interface in neureka.fluent.building.states
 
-
ToForTensor<V> - Interface in neureka.fluent.building.states
+
ToForTensor<V> - Interface in neureka.fluent.slicing.states
 
-
ToForTensor<V> - Interface in neureka.fluent.slicing.states
+
toIntArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
 
-
toIntArray() - Method in interface neureka.Shape
+
toIntArray() - Method in interface neureka.Shape
 
-
toIntArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
toLayout(Tensor<?>, NDConfiguration.Layout) - Static method in class neureka.backend.main.operations.other.ReLayout
 
-
toLayout(NDConfiguration.Layout) - Method in interface neureka.MutateTensor
+
toLayout(NDConfiguration.Layout) - Method in interface neureka.MutateTensor
This method allows you to modify the data-layout of this AbstractNda.
-
toLayout(Tensor<?>, NDConfiguration.Layout) - Static method in class neureka.backend.main.operations.other.ReLayout
+
toLongArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
 
-
toLongArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
toObjectArray(Function<Integer, Object>) - Method in class neureka.common.utility.DataConverter.ForTensor
 
-
toObjectArray(Function<Integer, Object>) - Method in class neureka.common.utility.DataConverter.ForTensor
-
 
-
toOptional() - Method in interface neureka.Nda.Item
+
toOptional() - Method in interface neureka.Nda.Item
Converts this item into an optional value.
-
toShortArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
toShortArray(Function<Integer, Number>) - Method in class neureka.common.utility.DataConverter.ForTensor
+
 
+
toString() - Method in class neureka.autograd.GraphNode
 
-
toString() - Method in class neureka.autograd.GraphNode
+
toString(GraphNode.Print) - Method in class neureka.autograd.GraphNode
 
-
toString() - Method in class neureka.autograd.JITProp
+
toString() - Method in class neureka.autograd.JITProp
 
-
toString() - Method in class neureka.backend.api.BackendContext
+
toString() - Method in class neureka.backend.api.BackendContext
 
-
toString() - Method in class neureka.backend.api.ExecutionCall
+
toString() - Method in class neureka.backend.api.ExecutionCall
 
-
toString() - Method in class neureka.backend.api.LazyRef
+
toString() - Method in class neureka.backend.api.LazyRef
 
-
toString() - Method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
toString() - Method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
toString() - Method in class neureka.backend.api.template.operations.AbstractOperation
+
toString() - Method in class neureka.backend.api.template.operations.AbstractOperation
 
-
toString() - Method in class neureka.backend.ocl.CLBackend
+
toString() - Method in class neureka.backend.ocl.CLBackend
 
-
toString() - Method in class neureka.devices.file.FileDevice
+
toString() - Method in class neureka.devices.file.FileDevice
 
-
toString() - Method in class neureka.devices.host.CPU
+
toString() - Method in class neureka.devices.host.CPU
 
-
toString() - Method in class neureka.devices.host.machine.BasicMachine
+
toString() - Method in class neureka.devices.host.machine.BasicMachine
 
-
toString() - Method in class neureka.devices.host.machine.ConcreteMachine
+
toString() - Method in class neureka.devices.host.machine.ConcreteMachine
 
-
toString() - Method in class neureka.devices.host.machine.Hardware
+
toString() - Method in class neureka.devices.host.machine.Hardware
 
-
toString() - Method in class neureka.devices.opencl.OpenCLDevice
+
toString() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
toString() - Method in class neureka.devices.opencl.OpenCLPlatform
+
toString() - Method in class neureka.devices.opencl.OpenCLPlatform
 
-
toString() - Method in class neureka.dtype.DataType
+
toString() - Method in class neureka.dtype.DataType
 
-
toString() - Method in class neureka.framing.NDFrame
+
toString() - Method in class neureka.framing.NDFrame
 
-
toString() - Method in class neureka.framing.Relation
+
toString() - Method in class neureka.framing.Relation
 
-
toString() - Method in class neureka.math.args.Arg
+
toString() - Method in class neureka.math.args.Arg
 
-
toString() - Method in interface neureka.math.Function
+
toString() - Method in interface neureka.math.Function
Turns this function into a string representation which can be used to reconstruct this function or combine it with other function strings to parse entirely new functions...
-
toString() - Method in class neureka.math.FunctionCache
+
toString() - Method in class neureka.math.FunctionCache
 
-
toString() - Method in class neureka.math.Functions
+
toString() - Method in class neureka.math.Functions
 
-
toString() - Method in class neureka.math.implementations.FunctionConstant
+
toString() - Method in class neureka.math.implementations.FunctionConstant
 
-
toString() - Method in class neureka.math.implementations.FunctionInput
+
toString() - Method in class neureka.math.implementations.FunctionInput
 
-
toString() - Method in class neureka.math.implementations.FunctionNode
+
toString() - Method in class neureka.math.implementations.FunctionNode
 
-
toString() - Method in class neureka.math.implementations.FunctionVariable
+
toString() - Method in class neureka.math.implementations.FunctionVariable
 
-
toString() - Method in interface neureka.Nda
+
toString(NDPrintSettings) - Method in interface neureka.Nda
-
This method returns a String representation of this nd-array.
+
Use this to turn this nd-array into a String instance based on the provided + NDPrintSettings instance, which allows you to configure things + like the number of chars per entry, delimiters, the number of items per line, etc.
-
toString() - Method in class neureka.ndim.config.AbstractNDC
-
 
-
toString() - Method in class neureka.Neureka.Settings.AutoGrad
-
 
-
toString() - Method in class neureka.Neureka.Settings.Debug
+
toString(Consumer<NDPrintSettings>) - Method in interface neureka.Nda
+
+
This allows you to provide a lambda which configures how this nd-array should be + converted to String instances.
+
+
toString() - Method in interface neureka.Nda
+
+
This method returns a String representation of this nd-array.
+
+
toString() - Method in class neureka.ndim.config.AbstractNDC
 
-
toString() - Method in class neureka.Neureka.Settings.DType
+
toString() - Method in class neureka.Neureka.Settings.AutoGrad
 
-
toString() - Method in class neureka.Neureka.Settings.NDim
+
toString() - Method in class neureka.Neureka.Settings.Debug
 
-
toString() - Method in class neureka.Neureka.Settings
+
toString() - Method in class neureka.Neureka.Settings.DType
 
-
toString() - Method in class neureka.Neureka.Settings.View
+
toString() - Method in class neureka.Neureka.Settings.NDim
 
-
toString() - Method in class neureka.Neureka
+
toString() - Method in class neureka.Neureka.Settings
 
-
toString() - Method in class neureka.view.NdaAsString
+
toString() - Method in class neureka.Neureka.Settings.View
 
-
toString(String) - Method in interface neureka.Tensor
+
toString() - Method in class neureka.Neureka
 
-
toString(Consumer<NDPrintSettings>) - Method in interface neureka.Nda
-
-
This allows you to provide a lambda which configures how this nd-array should be - converted to String instances.
-
-
toString(Consumer<NDPrintSettings>) - Method in interface neureka.Tensor
-
-
This allows you to provide a lambda which configures how this nd-array should be - converted to String instances.
-
-
toString(GraphNode.Print) - Method in class neureka.autograd.GraphNode
+
toString(String) - Method in interface neureka.Tensor
 
-
toString(NDPrintSettings) - Method in interface neureka.Nda
+
toString(NDPrintSettings) - Method in interface neureka.Tensor
-
Use this to turn this nd-array into a String instance based on the provided +
Use this to turn this nd-array into a String instance based on the provided NDPrintSettings instance, which allows you to configure things like the number of chars per entry, delimiters, the number of items per line, etc.
-
toString(NDPrintSettings) - Method in interface neureka.Tensor
+
toString(Consumer<NDPrintSettings>) - Method in interface neureka.Tensor
-
Use this to turn this nd-array into a String instance based on the provided - NDPrintSettings instance, which allows you to configure things - like the number of chars per entry, delimiters, the number of items per line, etc.
+
This allows you to provide a lambda which configures how this nd-array should be + converted to String instances.
-
toTarget(HolderType) - Method in interface neureka.dtype.NumericType
+
toString() - Method in class neureka.view.NdaAsString
 
-
toTarget(Byte) - Method in class neureka.dtype.custom.I8
+
toTarget(Float) - Method in class neureka.dtype.custom.F32
 
-
toTarget(Byte) - Method in class neureka.dtype.custom.UI8
+
toTarget(Double) - Method in class neureka.dtype.custom.F64
 
-
toTarget(Double) - Method in class neureka.dtype.custom.F64
+
toTarget(Short) - Method in class neureka.dtype.custom.I16
 
-
toTarget(Float) - Method in class neureka.dtype.custom.F32
+
toTarget(Integer) - Method in class neureka.dtype.custom.I32
 
-
toTarget(Integer) - Method in class neureka.dtype.custom.I32
+
toTarget(Long) - Method in class neureka.dtype.custom.I64
 
-
toTarget(Integer) - Method in class neureka.dtype.custom.UI32
+
toTarget(Byte) - Method in class neureka.dtype.custom.I8
 
-
toTarget(Long) - Method in class neureka.dtype.custom.I64
+
toTarget(Short) - Method in class neureka.dtype.custom.UI16
 
-
toTarget(Long) - Method in class neureka.dtype.custom.UI64
+
toTarget(Integer) - Method in class neureka.dtype.custom.UI32
 
-
toTarget(Short) - Method in class neureka.dtype.custom.I16
+
toTarget(Long) - Method in class neureka.dtype.custom.UI64
 
-
toTarget(Short) - Method in class neureka.dtype.custom.UI16
+
toTarget(Byte) - Method in class neureka.dtype.custom.UI8
 
-
toType(Class<V>) - Method in interface neureka.MutateNda
+
toTarget(HolderType) - Method in interface neureka.dtype.NumericType
+
 
+
toType(Class<V>) - Method in interface neureka.MutateNda
This method is an inline operation which changes the underlying data of this tensor.
-
toType(Class<V>) - Method in interface neureka.MutateTensor
+
toType(Class<V>) - Method in interface neureka.MutateTensor
This method is an inline operation which changes the underlying data of this tensor.
-
transpose(int, int) - Method in interface neureka.Nda
+
transpose(Tensor<T>) - Static method in class neureka.backend.main.algorithms.Util
+
 
+
transpose(int, int) - Method in interface neureka.Nda
Returns a view of the original tensor input the targeted axes are swapped / transposed.
-
transpose(int, int) - Method in interface neureka.Tensor
+
transpose(int, int) - Method in interface neureka.Tensor
Returns a view of the original tensor input the targeted axes are swapped / transposed.
-
transpose(Tensor<T>) - Static method in class neureka.backend.main.algorithms.Util
-
 
-
transpose2D() - Method in class neureka.math.Functions
-
 
-
TRUE - Enum constant in enum class neureka.ndim.iterator.NDIterator.NonVirtual
+
transpose2D() - Method in class neureka.math.Functions
 
-
tryGroovyClosureOn(Object, Object) - Static method in class neureka.common.utility.SettingsLoader
+
tryGroovyClosureOn(Object, Object) - Static method in class neureka.common.utility.SettingsLoader
This method makes it possible to configure the library via a Groovy DSL!
-
tryGroovyScriptsOn(Neureka, Consumer<String>) - Static method in class neureka.common.utility.SettingsLoader
+
tryGroovyScriptsOn(Neureka, Consumer<String>) - Static method in class neureka.common.utility.SettingsLoader
 
-
TWO - Enum constant in enum class neureka.devices.host.concurrent.Parallelism
-
-
2
-
-
type() - Method in class neureka.autograd.GraphNode
+
type() - Method in class neureka.autograd.GraphNode
 
-
type() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
+
TYPE - Static variable in class neureka.backend.main.implementations.broadcast.CLScalarBroadcast
+
 
+
type() - Method in interface neureka.common.composition.Component.OwnerChangeRequest
-
type() - Method in class neureka.devices.opencl.OpenCLDevice
-
 
-
type() - Method in class neureka.devices.ReferenceCounter.ChangeEvent
+
type() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
TYPE - Static variable in class neureka.backend.main.implementations.broadcast.CLScalarBroadcast
+
type() - Method in class neureka.devices.ReferenceCounter.ChangeEvent
 
-
typeClassImplements(Class<?>) - Method in class neureka.dtype.DataType
+
typeClassImplements(Class<?>) - Method in class neureka.dtype.DataType
 
-

U

-
-
UBUNTU - Enum constant in enum class neureka.devices.opencl.utility.Messages.Tips
+ + + +

U

+
+
UI16 - Class in neureka.dtype.custom
 
-
UI16 - Class in neureka.dtype.custom
+
UI16() - Constructor for class neureka.dtype.custom.UI16
 
-
UI16() - Constructor for class neureka.dtype.custom.UI16
+
UI32 - Class in neureka.dtype.custom
 
-
UI32 - Class in neureka.dtype.custom
+
UI32() - Constructor for class neureka.dtype.custom.UI32
 
-
UI32() - Constructor for class neureka.dtype.custom.UI32
+
UI64 - Class in neureka.dtype.custom
 
-
UI64 - Class in neureka.dtype.custom
+
UI64() - Constructor for class neureka.dtype.custom.UI64
 
-
UI64() - Constructor for class neureka.dtype.custom.UI64
+
UI8 - Class in neureka.dtype.custom
 
-
UI8 - Class in neureka.dtype.custom
+
UI8() - Constructor for class neureka.dtype.custom.UI8
 
-
UI8() - Constructor for class neureka.dtype.custom.UI8
-
 
-
units - Variable in class neureka.devices.host.machine.CommonMachine
+
units - Variable in class neureka.devices.host.machine.CommonMachine
The number of top level (L3 or L2) cache units.
-
UNITS - Enum constant in enum class neureka.devices.host.concurrent.Parallelism
-
-
The number of top level (L2 or L3) cache units
-
-
UNKNOWN - Enum constant in enum class neureka.devices.opencl.OpenCLDevice.Type
-
 
-
UNKNOWN - Enum constant in enum class neureka.devices.opencl.utility.Messages.Tips
+
unpackAndCorrect(String) - Static method in class neureka.math.parsing.ParseUtil
 
-
unpackAndCorrect(String) - Static method in class neureka.math.parsing.ParseUtil
+
UNSUITABLE - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
UNSPECIFIC - Enum constant in enum class neureka.ndim.config.NDConfiguration.Layout
-
 
-
UNSUITABLE - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
-
 
-
upcast(Class<U>) - Method in interface neureka.MutateTensor
+
upcast(Class<U>) - Method in interface neureka.MutateTensor
Use this to do a runtime checked upcast of the type parameter of the tensor.
-
update(Component.OwnerChangeRequest<Extensions>) - Method in class neureka.backend.ocl.CLBackend
+
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.autograd.GraphNode
+
 
+
update(Component.OwnerChangeRequest<Extensions>) - Method in class neureka.backend.ocl.CLBackend
Updating the CLContext will cause the list of existing OpenCLPlatform instances to be cleared and refilled with completely new OpenCLPlatform instances.
-
update(Component.OwnerChangeRequest<Args>) - Method in class neureka.math.args.Arg
-
 
-
update(Component.OwnerChangeRequest<Tensor<Number>>) - Method in class neureka.devices.opencl.OpenCLDevice
-
 
-
update(Component.OwnerChangeRequest<Tensor<Object>>) - Method in class neureka.devices.file.FileDevice
-
 
-
update(Component.OwnerChangeRequest<Tensor<Object>>) - Method in class neureka.devices.host.CPU
+
update(Component.OwnerChangeRequest<O>) - Method in interface neureka.common.composition.Component
-
This method is part of the component system built into the Tensor class.
+
Components are not the slaves of their owners.
-
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.autograd.GraphNode
-
 
-
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.devices.AbstractDevice
+
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.devices.AbstractDevice
A Device is a component of a tensor.
-
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.framing.NDFrame
+
update(Component.OwnerChangeRequest<Tensor<Object>>) - Method in class neureka.devices.file.FileDevice
+
 
+
update(Component.OwnerChangeRequest<Tensor<Object>>) - Method in class neureka.devices.host.CPU
+
+
This method is part of the component system built into the Tensor class.
+
+
update(Component.OwnerChangeRequest<Tensor<Number>>) - Method in class neureka.devices.opencl.OpenCLDevice
+
 
+
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.framing.NDFrame
 
-
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.framing.Relation
+
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in class neureka.framing.Relation
 
-
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in interface neureka.Tensor
+
update(Component.OwnerChangeRequest<Args>) - Method in class neureka.math.args.Arg
+
 
+
update(Component.OwnerChangeRequest<Tensor<V>>) - Method in interface neureka.Tensor
Important : Components of type Tensor are simply gradients! Currently, this method is used only to catch illegal arguments which is for example the case when trying to attach a gradient with a different shape...
-
update(Component.OwnerChangeRequest<O>) - Method in interface neureka.common.composition.Component
-
-
Components are not the slaves of their owners.
-
-
UPDATED - Enum constant in enum class neureka.common.composition.Component.IsBeing
-
 
-
usages() - Method in interface neureka.Data
+
usages() - Method in interface neureka.Data
This method returns the number of times this data object is currently in use by a nd-array, meaning that the number of usages is also the number of nd-arrays which are currently referencing this data object.
-
usages() - Method in class neureka.devices.AbstractDeviceData
+
usages() - Method in class neureka.devices.AbstractDeviceData
 
-
usesAD() - Method in class neureka.autograd.GraphNode
+
usesAD() - Method in class neureka.autograd.GraphNode
This gradient node is involved in auto-differentiation.
-
usesForwardAD() - Method in class neureka.autograd.GraphNode
+
usesForwardAD() - Method in class neureka.autograd.GraphNode
This node propagates forward.
-
usesReverseAD() - Method in class neureka.autograd.GraphNode
+
usesReverseAD() - Method in class neureka.autograd.GraphNode
This node propagates _backward.
-
Util - Class in neureka.backend.main.algorithms
+
Util - Class in neureka.backend.main.algorithms
 
-
Util() - Constructor for class neureka.backend.main.algorithms.Util
+
Util() - Constructor for class neureka.backend.main.algorithms.Util
 
-
Util() - Constructor for class neureka.view.NdaAsString.Util
+
Util() - Constructor for class neureka.view.NdaAsString.Util
 
-
utility() - Method in class neureka.Neureka
+
Utility() - Constructor for class neureka.common.utility.DataConverter.Utility
 
-
Utility() - Constructor for class neureka.common.utility.DataConverter.Utility
+
Utility() - Constructor for class neureka.ndim.config.NDConfiguration.Utility
 
-
Utility() - Constructor for class neureka.ndim.config.NDConfiguration.Utility
+
utility() - Method in class neureka.Neureka
 
-
Utility() - Constructor for class neureka.Neureka.Utility
+
Utility() - Constructor for class neureka.Neureka.Utility
 
-

V

-
-
validate() - Method in class neureka.backend.api.Call
+ + + +

V

+
+
validate() - Method in class neureka.backend.api.Call
 
-
Validator() - Constructor for class neureka.backend.api.Call.Validator
+
Validator() - Constructor for class neureka.backend.api.Call.Validator
 
-
valOf(Class<T>) - Method in class neureka.math.args.Args
+
valOf(Class<T>) - Method in class neureka.math.args.Args
 
-
valOfOr(Class<T>, V) - Method in class neureka.math.args.Args
+
valOfOr(Class<T>, V) - Method in class neureka.math.args.Args
 
-
value() - Method in class neureka.math.implementations.FunctionConstant
+
value() - Method in class neureka.math.implementations.FunctionConstant
 
-
valueOf(String) - Static method in enum class neureka.autograd.GraphNode.Print
+
valueOf(String) - Static method in enum neureka.autograd.GraphNode.Print
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.backend.api.AutoDiffMode
+
valueOf(String) - Static method in enum neureka.backend.api.AutoDiffMode
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
+
valueOf(String) - Static method in enum neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.backend.main.operations.other.internal.CPUReduce.Type
+
valueOf(String) - Static method in enum neureka.backend.main.operations.other.internal.CPUReduce.Type
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.common.composition.Component.IsBeing
+
valueOf(String) - Static method in enum neureka.common.composition.Component.IsBeing
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.devices.host.concurrent.Parallelism
+
valueOf(String) - Static method in enum neureka.devices.host.concurrent.Parallelism
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.devices.opencl.OpenCLDevice.Type
+
valueOf(String) - Static method in enum neureka.devices.opencl.OpenCLDevice.Type
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.devices.opencl.utility.Messages.Tips
+
valueOf(String) - Static method in enum neureka.devices.opencl.utility.Messages.Tips
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.devices.ReferenceCounter.ChangeType
+
valueOf(String) - Static method in enum neureka.devices.ReferenceCounter.ChangeType
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.ndim.config.NDConfiguration.Layout
+
valueOf(String) - Static method in enum neureka.ndim.config.NDConfiguration.Layout
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.ndim.config.NDTrait
+
valueOf(String) - Static method in enum neureka.ndim.config.NDTrait
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.ndim.iterator.NDIterator.NonVirtual
+
valueOf(String) - Static method in enum neureka.ndim.iterator.NDIterator.NonVirtual
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
valueOf(String) - Static method in enum class neureka.Tensor.ImageType
+
valueOf(String) - Static method in enum neureka.Tensor.ImageType
-
Returns the enum constant of this class with the specified name.
+
Returns the enum constant of this type with the specified name.
-
values() - Static method in enum class neureka.autograd.GraphNode.Print
+
values() - Static method in enum neureka.autograd.GraphNode.Print
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.backend.api.AutoDiffMode
+
values() - Static method in enum neureka.backend.api.AutoDiffMode
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
+
values() - Static method in enum neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.backend.main.operations.other.internal.CPUReduce.Type
+
values() - Static method in enum neureka.backend.main.operations.other.internal.CPUReduce.Type
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.common.composition.Component.IsBeing
+
values() - Static method in enum neureka.common.composition.Component.IsBeing
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.devices.host.concurrent.Parallelism
+
values() - Static method in enum neureka.devices.host.concurrent.Parallelism
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.devices.opencl.OpenCLDevice.Type
+
values() - Static method in enum neureka.devices.opencl.OpenCLDevice.Type
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.devices.opencl.utility.Messages.Tips
+
values() - Static method in enum neureka.devices.opencl.utility.Messages.Tips
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.devices.ReferenceCounter.ChangeType
+
values() - Static method in enum neureka.devices.ReferenceCounter.ChangeType
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.ndim.config.NDConfiguration.Layout
+
values() - Static method in enum neureka.ndim.config.NDConfiguration.Layout
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.ndim.config.NDTrait
+
values() - Static method in enum neureka.ndim.config.NDTrait
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.ndim.iterator.NDIterator.NonVirtual
+
values() - Static method in enum neureka.ndim.iterator.NDIterator.NonVirtual
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
values() - Static method in enum class neureka.Tensor.ImageType
+
values() - Static method in enum neureka.Tensor.ImageType
-
Returns an array containing the constants of this enum class, in +
Returns an array containing the constants of this enum type, in the order they are declared.
-
vector(Iterable<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
+
vector(Object[]) - Method in class neureka.fluent.building.NdaBuilder
+
 
+
vector(V...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
This method creates and returns a vector Tensor instance which wraps the provided values.
-
vector(Iterable<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
+
vector(List<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
This method creates and returns a vector Tensor instance which wraps the provided values.
-
vector(Object[]) - Method in class neureka.fluent.building.NdaBuilder
-
 
-
vector(List<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
+
vector(Iterable<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
This method creates and returns a vector Tensor instance which wraps the provided values.
-
vector(List<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
+
vector(V...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
This method creates and returns a vector Tensor instance which wraps the provided values.
-
vector(V...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
+
vector(List<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
This method creates and returns a vector Tensor instance which wraps the provided values.
-
vector(V...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
+
vector(Iterable<V>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
This method creates and returns a vector Tensor instance which wraps the provided values.
-
vendor() - Method in class neureka.devices.opencl.OpenCLDevice
+
vendor() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
version() - Method in class neureka.devices.opencl.OpenCLDevice
+
version() - Method in class neureka.devices.opencl.OpenCLDevice
 
-
version() - Static method in class neureka.Neureka
+
version() - Static method in class neureka.Neureka
 
-
VERY_GOOD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
+
VERY_GOOD - Static variable in interface neureka.backend.api.fun.SuitabilityPredicate
 
-
view() - Method in class neureka.Neureka.Settings
+
view() - Method in class neureka.Neureka.Settings
 
-
view(Object) - Method in class neureka.Neureka.Settings
+
view(Object) - Method in class neureka.Neureka.Settings
 
-
virtualize() - Method in interface neureka.devices.Device.Access
+
virtualize() - Method in interface neureka.devices.Device.Access
 
-
virtualize() - Method in class neureka.devices.host.machine.Hardware
+
virtualize() - Method in class neureka.devices.host.machine.Hardware
 
-
VirtualNDConfiguration - Class in neureka.ndim.config.types.views.virtual
+
VirtualNDConfiguration - Class in neureka.ndim.config.types.views.virtual
VirtualNDConfigurations represent tensors which are filled homogeneously with a single value exclusively, like for example a tensor filled with only zeros.
-
VirtualNDIterator - Class in neureka.ndim.iterator.types.virtual
+
VirtualNDIterator - Class in neureka.ndim.iterator.types.virtual
 
-
VirtualNDIterator(VirtualNDConfiguration) - Constructor for class neureka.ndim.iterator.types.virtual.VirtualNDIterator
+
VirtualNDIterator(VirtualNDConfiguration) - Constructor for class neureka.ndim.iterator.types.virtual.VirtualNDIterator
 
-

W

-
-
WINDOWS - Enum constant in enum class neureka.devices.opencl.utility.Messages.Tips
+ + + +

W

+
+
with(Tensor<N>...) - Method in class neureka.backend.api.Call.Builder
+
 
+
with(F) - Method in interface neureka.backend.main.algorithms.internal.WithForward
 
-
with(F) - Method in interface neureka.backend.main.algorithms.internal.WithForward
+
With<ValueType,TargetType> - Interface in neureka.framing.fluent
 
-
with(String) - Method in class neureka.view.NDPrintSettings
+
with(ValueType) - Method in interface neureka.framing.fluent.With
 
-
with(Arg<?>...) - Method in interface neureka.math.Function
+
with(Arg<?>...) - Method in interface neureka.math.Function
Use this to call this Function alongside with some additional meta-arguments which will be passed to the underlying Operation(s).
-
with(Args) - Method in interface neureka.math.Function
+
with(Args) - Method in interface neureka.math.Function
Use this to call this Function alongside with some additional meta-arguments which will be passed to the underlying Operation(s).
-
with(Tensor<N>...) - Method in class neureka.backend.api.Call.Builder
-
 
-
with(NDPrintSettings) - Method in class neureka.view.NDPrintSettings
-
 
-
with(ValueType) - Method in interface neureka.framing.fluent.With
+
with(NDPrintSettings) - Method in class neureka.view.NDPrintSettings
 
-
With<ValueType,TargetType> - Interface in neureka.framing.fluent
+
with(String) - Method in class neureka.view.NDPrintSettings
 
-
withADAction(ADAction) - Method in class neureka.backend.api.Result
+
withADAction(ADAction) - Method in class neureka.backend.api.Result
 
-
withAddedInputAt(int, Tensor<?>) - Method in class neureka.backend.api.ExecutionCall
+
withAddedInputAt(int, Tensor<?>) - Method in class neureka.backend.api.ExecutionCall
 
-
withArgs(Arg<?>...) - Method in class neureka.backend.api.ExecutionCall
+
withArgs(Arg<?>...) - Method in class neureka.backend.api.ExecutionCall
Use this to produce a clone with a new set of meta arguments.
-
withArity(int) - Static method in class neureka.backend.main.implementations.CPUImplementation
+
withArity(int) - Static method in class neureka.backend.main.implementations.CPUImplementation
 
-
withAutoDiff(ADActionSupplier) - Method in class neureka.backend.api.Result
+
withAutoDiff(ADActionSupplier) - Method in class neureka.backend.api.Result
 
-
withAxesLabels(List<List<Object>>) - Method in class neureka.framing.NDFrame
+
withAxesLabels(List<List<Object>>) - Method in class neureka.framing.NDFrame
 
-
withConfig(String) - Method in interface neureka.view.NdaAsString.Builder
+
withConfig(NDPrintSettings) - Method in interface neureka.view.NdaAsString.Builder
 
-
withConfig(NDPrintSettings) - Method in interface neureka.view.NdaAsString.Builder
+
withConfig(String) - Method in interface neureka.view.NdaAsString.Builder
 
-
withDecayRate(double) - Method in class neureka.optimization.implementations.MomentumFactory
+
withDecayRate(double) - Method in class neureka.optimization.implementations.MomentumFactory
 
-
withDecayRate(double) - Method in class neureka.optimization.implementations.RMSPropFactory
+
withDecayRate(double) - Method in class neureka.optimization.implementations.RMSPropFactory
 
-
WithForward<F> - Interface in neureka.backend.main.algorithms.internal
+
WithForward<F> - Interface in neureka.backend.main.algorithms.internal
 
-
withInputAt(int, Tensor<?>) - Method in class neureka.backend.api.ExecutionCall
+
withInputAt(int, Tensor<?>) - Method in class neureka.backend.api.ExecutionCall
 
-
withInputs(Tensor<?>...) - Method in class neureka.backend.api.ExecutionCall
+
withInputs(Tensor<?>...) - Method in class neureka.backend.api.ExecutionCall
Use this to produce a clone with a new array of input tensors.
-
withLabel(String) - Method in class neureka.framing.NDFrame
+
withLabel(String) - Method in class neureka.framing.NDFrame
 
-
withLabel(String) - Method in interface neureka.Nda
+
withLabel(String) - Method in interface neureka.Nda
 
-
withLabel(String) - Method in interface neureka.Tensor
-
withLabels(String[]...) - Method in interface neureka.Nda
+
withLabel(String) - Method in interface neureka.Tensor
+
withLabels(String[]...) - Method in interface neureka.Nda
-
This method receives a nested String array which +
This method receives a nested String array which ought to contain a label for the index of this nd-array.
-
withLabels(String[]...) - Method in interface neureka.Tensor
+
withLabels(List<List<Object>>) - Method in interface neureka.Nda
-
This method receives a nested String array which +
This method receives a nested String list which ought to contain a label for the index of this nd-array.
-
withLabels(List<List<Object>>) - Method in interface neureka.Nda
+
withLabels(Map<Object, List<Object>>) - Method in interface neureka.Nda
-
This method receives a nested String list which - ought to contain a label for the index of this nd-array.
+
This method provides the ability to + label not only the indices of the shape of this nd-array, but also + the dimension of the shape.
-
withLabels(List<List<Object>>) - Method in interface neureka.Tensor
+
withLabels(String[]...) - Method in interface neureka.Tensor
-
This method receives a nested String list which +
This method receives a nested String array which ought to contain a label for the index of this nd-array.
-
withLabels(Map<Object, List<Object>>) - Method in interface neureka.Nda
+
withLabels(List<List<Object>>) - Method in interface neureka.Tensor
-
This method provides the ability to - label not only the indices of the shape of this nd-array, but also - the dimension of the shape.
+
This method receives a nested String list which + ought to contain a label for the index of this nd-array.
-
withLabels(Map<Object, List<Object>>) - Method in interface neureka.Tensor
+
withLabels(Map<Object, List<Object>>) - Method in interface neureka.Tensor
This method provides the ability to label not only the indices of the shape of this nd-array, but also the dimension of the shape.
-
withLearningRate(double) - Method in class neureka.optimization.implementations.AdaGradFactory
+
withLearningRate(double) - Method in class neureka.optimization.implementations.AdaGradFactory
 
-
withLearningRate(double) - Method in class neureka.optimization.implementations.ADAMFactory
+
withLearningRate(double) - Method in class neureka.optimization.implementations.ADAMFactory
 
-
withLearningRate(double) - Method in class neureka.optimization.implementations.MomentumFactory
+
withLearningRate(double) - Method in class neureka.optimization.implementations.MomentumFactory
 
-
withLearningRate(double) - Method in class neureka.optimization.implementations.RMSPropFactory
+
withLearningRate(double) - Method in class neureka.optimization.implementations.RMSPropFactory
 
-
withLearningRate(double) - Method in class neureka.optimization.implementations.SGDFactory
+
withLearningRate(double) - Method in class neureka.optimization.implementations.SGDFactory
 
-
withName(String) - Static method in interface neureka.backend.api.Algorithm
+
withName(String) - Static method in interface neureka.backend.api.Algorithm
This is a factory method for creating a new instance of this FunAlgorithm class.
-
withName(String) - Static method in interface neureka.backend.api.DeviceAlgorithm
+
withName(String) - Static method in interface neureka.backend.api.DeviceAlgorithm
This is a factory method for creating a new instance of this FunDeviceAlgorithm class.
-
withOperation(Operation) - Method in class neureka.backend.api.ExecutionCall
+
withOperation(Operation) - Method in class neureka.backend.api.ExecutionCall
 
-
withRemovedInputAt(int) - Method in class neureka.backend.api.ExecutionCall
+
withRemovedInputAt(int) - Method in class neureka.backend.api.ExecutionCall
 
-
withShape(int...) - Method in class neureka.fluent.building.NdaBuilder
+
withShape(int...) - Method in class neureka.fluent.building.NdaBuilder
 
-
withShape(int...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
+
withShape(int...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
Define a tensor shape by passing an array of int values to this method, which represent the shape of the Tensor that should be built.
-
withShape(int...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
+
withShape(List<N>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
-
Define a tensor shape by passing an array of int values to this method, +
Define a tensor shape by passing a list of numbers to this method, which represent the shape of the Tensor that should be built.
-
withShape(List<N>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVector
+
withShape(int...) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
-
Define a tensor shape by passing a list of numbers to this method, +
Define a tensor shape by passing an array of int values to this method, which represent the shape of the Tensor that should be built.
-
withShape(List<N>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
+
withShape(List<N>) - Method in interface neureka.fluent.building.states.WithShapeOrScalarOrVectorTensor
Define a tensor shape by passing a list of numbers to this method, which represent the shape of the Tensor that should be built.
-
WithShapeOrScalarOrVector<V> - Interface in neureka.fluent.building.states
+
WithShapeOrScalarOrVector<V> - Interface in neureka.fluent.building.states
 
-
WithShapeOrScalarOrVectorOnDevice<V> - Interface in neureka.fluent.building.states
+
WithShapeOrScalarOrVectorOnDevice<V> - Interface in neureka.fluent.building.states
 
-
WithShapeOrScalarOrVectorTensor<V> - Interface in neureka.fluent.building.states
+
WithShapeOrScalarOrVectorTensor<V> - Interface in neureka.fluent.building.states
 
-
withTime(long) - Method in class neureka.optimization.implementations.ADAMFactory
+
withTime(long) - Method in class neureka.optimization.implementations.ADAMFactory
 
-
WorkScheduler - Class in neureka.devices.host.concurrent
+
WorkScheduler - Class in neureka.devices.host.concurrent
An API for registering workloads which will be divided into smaller workloads so that they can be executed efficiently by a thread pool...
-
WorkScheduler() - Constructor for class neureka.devices.host.concurrent.WorkScheduler
+
WorkScheduler() - Constructor for class neureka.devices.host.concurrent.WorkScheduler
 
-
WorkScheduler.Divider - Class in neureka.devices.host.concurrent
+
WorkScheduler.Divider - Class in neureka.devices.host.concurrent
Divides workloads until they can be processed efficiently and then submits them to a thread pool for execution...
-
write(V) - Method in interface neureka.devices.Device.Access
+
write(V) - Method in interface neureka.devices.Device.Access
Use this to write a single scalar item into the accessed tensor at one or more positions within the tensor.
-
writeDataTo(DataOutput, Iterator<Byte>) - Method in class neureka.dtype.custom.I8
-
 
-
writeDataTo(DataOutput, Iterator<TargetType>) - Method in interface neureka.dtype.NumericType
+
writeDataTo(DataOutput, Iterator<TargetType>) - Method in interface neureka.dtype.NumericType
This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
-
writeFrom(Object) - Method in interface neureka.devices.Device.Access
+
writeFrom(Object, int) - Method in interface neureka.devices.Device.Access
-
Use this method to write data to the provided tensor, given that - the tensor is already stored on this device!

+
Use this to write data from an array into the accessed tensor.
-
writeFrom(Object, int) - Method in interface neureka.devices.Device.Access
+
writeFrom(Object) - Method in interface neureka.devices.Device.Access
-
Use this to write data from an array into the accessed tensor.
+
Use this method to write data to the provided tensor, given that + the tensor is already stored on this device!

-

X

-
-
XConvLeft - Class in neureka.backend.main.operations.linear
+ + + +

X

+
+
XConvLeft - Class in neureka.backend.main.operations.linear
 
-
XConvLeft() - Constructor for class neureka.backend.main.operations.linear.XConvLeft
+
XConvLeft() - Constructor for class neureka.backend.main.operations.linear.XConvLeft
 
-
XConvRight - Class in neureka.backend.main.operations.linear
+
XConvRight - Class in neureka.backend.main.operations.linear
 
-
XConvRight() - Constructor for class neureka.backend.main.operations.linear.XConvRight
+
XConvRight() - Constructor for class neureka.backend.main.operations.linear.XConvRight
 
-
xor(double) - Method in interface neureka.Tensor
+
xor(Tensor<V>) - Method in interface neureka.Tensor
-
This method is a functionally identical synonym to the Tensor.power(Tensor) method.
+
This method is a functionally identical synonym to the Tensor.power(Tensor) method.
-
xor(Tensor<V>) - Method in interface neureka.Tensor
+
xor(double) - Method in interface neureka.Tensor
-
This method is a functionally identical synonym to the Tensor.power(Tensor) method.
+
This method is a functionally identical synonym to the Tensor.power(Tensor) method.
-

_

-
-
_actualize(Tensor<?>) - Method in class neureka.devices.AbstractDevice
+ + + +

_

+
+
_actualize(Tensor<?>) - Method in class neureka.devices.AbstractDevice
 
-
_actualize(Tensor<?>) - Method in class neureka.devices.host.CPU
+
_actualize(Tensor<?>) - Method in class neureka.devices.host.CPU
 
-
_actualize(Tensor<?>) - Method in class neureka.devices.opencl.OpenCLDevice
+
_actualize(Tensor<?>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_approveExecutionOf(Tensor<?>[], int, Operation) - Method in class neureka.devices.AbstractDevice
+
_approveExecutionOf(Tensor<?>[], int, Operation) - Method in class neureka.devices.AbstractDevice
This method is the internal approval routine called by its public counterpart and implemented by classes extending this very abstract class.
-
_approveExecutionOf(Tensor<?>[], int, Operation) - Method in class neureka.devices.host.CPU
+
_approveExecutionOf(Tensor<?>[], int, Operation) - Method in class neureka.devices.host.CPU
 
-
_approveExecutionOf(Tensor<?>[], int, Operation) - Method in class neureka.devices.opencl.OpenCLDevice
+
_approveExecutionOf(Tensor<?>[], int, Operation) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_arguments - Variable in class neureka.backend.api.Call
+
_arguments - Variable in class neureka.backend.api.Call
Meta arguments which are usually specific to certain operations.
-
_arity - Variable in class neureka.backend.api.template.operations.AbstractOperation
+
_arity - Variable in class neureka.backend.api.template.operations.AbstractOperation
Arity is the number of arguments or operands that this function or operation takes.
-
_cacheArray(int[]) - Static method in class neureka.ndim.config.AbstractNDC
+
_cacheArray(int[]) - Static method in class neureka.ndim.config.AbstractNDC
This method receives an int array and returns an int array which can either be the one provided or an array found in the global int array cache residing inside this class.
-
_cached(T) - Static method in class neureka.ndim.config.AbstractNDC
+
_cached(T) - Static method in class neureka.ndim.config.AbstractNDC
 
-
_cleaning(Object, Runnable) - Method in class neureka.devices.AbstractDevice
+
_cleaning(Object, Runnable) - Method in class neureka.devices.AbstractDevice
 
-
_dataRef - Variable in class neureka.devices.AbstractDeviceData
+
_dataRef - Variable in class neureka.devices.AbstractDeviceData
 
-
_dataType - Variable in class neureka.devices.AbstractDeviceData
+
_dataType - Variable in class neureka.devices.AbstractDeviceData
 
-
_dataTypeOf(Object) - Method in class neureka.devices.AbstractDevice
+
_dataTypeOf(Object) - Method in class neureka.devices.AbstractDevice
 
-
_dataTypeOf(Object) - Method in class neureka.devices.host.CPU
+
_dataTypeOf(Object) - Method in class neureka.devices.host.CPU
 
-
_dataTypeOf(Object) - Method in class neureka.devices.opencl.OpenCLDevice
+
_dataTypeOf(Object) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_deleteComponents() - Method in class neureka.common.composition.AbstractComponentOwner
+
_deleteComponents() - Method in class neureka.common.composition.AbstractComponentOwner
This method deletes the array of components of this component owner by nulling the array variable field.
-
_device - Variable in class neureka.backend.api.Call
+
_device - Variable in class neureka.backend.api.Call
This field references the device on which this ExecutionCall should be executed.
-
_fileName - Variable in class neureka.devices.file.IDXHandle
-
 
-
_function - Variable in class neureka.backend.api.template.operations.AbstractOperation
+
_function - Variable in class neureka.backend.api.template.operations.AbstractOperation
An operation may have two ways in which it can describe itself as String within a Function AST.
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
 
-
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
+
_getDeriveAt0() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
 
-
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
+
_getDeriveAt1() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcast
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastPower
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
 
-
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
+
_getFun() - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
 
-
_getFun() - Method in class neureka.backend.main.implementations.convolution.AbstractCPUConvolution
+
_getFun() - Method in class neureka.backend.main.implementations.convolution.AbstractCPUConvolution
 
-
_getFun() - Method in class neureka.backend.main.implementations.convolution.CPUConvolution
+
_getFun() - Method in class neureka.backend.main.implementations.convolution.CPUConvolution
 
-
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
+
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWise
 
-
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
+
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
 
-
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
+
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
 
-
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
+
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
 
-
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
+
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
 
-
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
+
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
 
-
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
+
_getFun() - Method in class neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
 
-
_implementations - Variable in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
_implementations - Variable in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
_inputs - Variable in class neureka.backend.api.Call
+
_inputs - Variable in class neureka.backend.api.Call
The tensor arguments from which an operation will either read or to which it will write.
-
_isDifferentiable - Variable in class neureka.backend.api.template.operations.AbstractOperation
+
_isDifferentiable - Variable in class neureka.backend.api.template.operations.AbstractOperation
Certain operations are not differentiable, meaning they cannot participate in neither forward nor reverse mode differentiation.
-
_isIndexer - Variable in class neureka.backend.api.template.operations.AbstractOperation
+
_isIndexer - Variable in class neureka.backend.api.template.operations.AbstractOperation
This flag determines if this operation is auto-indexing passed input arguments.
-
_isInline - Variable in class neureka.backend.api.template.operations.AbstractOperation
+
_isInline - Variable in class neureka.backend.api.template.operations.AbstractOperation
Inline operations are operations which change the state of the arguments passed to them.
-
_isOperator - Variable in class neureka.backend.api.template.operations.AbstractOperation
+
_isOperator - Variable in class neureka.backend.api.template.operations.AbstractOperation
 
-
_loadData() - Method in class neureka.devices.file.CSVHandle
+
_loadData() - Method in class neureka.devices.file.CSVHandle
 
-
_loadData() - Method in class neureka.devices.file.IDXHandle
+
_loadData() - Method in class neureka.devices.file.IDXHandle
 
-
_loadFile() - Method in class neureka.devices.file.IDXHandle
+
_log - Variable in class neureka.devices.AbstractDevice
 
-
_loadFileInputStream() - Method in class neureka.devices.file.IDXHandle
+
_numberOfDataObjects - Variable in class neureka.devices.AbstractBaseDevice
 
-
_log - Variable in class neureka.devices.AbstractDevice
+
_numberOfTensors - Variable in class neureka.devices.AbstractBaseDevice
 
-
_LOG - Static variable in class neureka.devices.file.IDXHandle
-
 
-
_numberOfDataObjects - Variable in class neureka.devices.AbstractBaseDevice
-
 
-
_numberOfTensors - Variable in class neureka.devices.AbstractBaseDevice
-
 
-
_operator - Variable in class neureka.backend.api.template.operations.AbstractOperation
+
_operator - Variable in class neureka.backend.api.template.operations.AbstractOperation
An operation may have two ways in which it can describe itself as String within a Function AST.
-
_owner - Variable in class neureka.devices.AbstractDeviceData
+
_owner - Variable in class neureka.devices.AbstractDeviceData
 
-
_prepareForExecution(ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
+
_prepareForExecution(ExecutionCall<? extends Device<?>>) - Static method in class neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm
 
-
_readAll(Tensor<T>, boolean) - Method in class neureka.devices.AbstractDevice
+
_readAll(Tensor<T>, boolean) - Method in class neureka.devices.AbstractDevice
 
-
_readAll(Tensor<T>, boolean) - Method in class neureka.devices.host.CPU
+
_readAll(Tensor<T>, boolean) - Method in class neureka.devices.host.CPU
 
-
_readAll(Tensor<T>, boolean) - Method in class neureka.devices.opencl.OpenCLDevice
+
_readAll(Tensor<T>, boolean) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_readArray(Tensor<T>, Class<A>, int, int) - Method in class neureka.devices.AbstractDevice
+
_readArray(Tensor<T>, Class<A>, int, int) - Method in class neureka.devices.AbstractDevice
 
-
_readArray(Tensor<T>, Class<A>, int, int) - Method in class neureka.devices.host.CPU
+
_readArray(Tensor<T>, Class<A>, int, int) - Method in class neureka.devices.host.CPU
 
-
_readArray(Tensor<T>, Class<A>, int, int) - Method in class neureka.devices.opencl.OpenCLDevice
+
_readArray(Tensor<T>, Class<A>, int, int) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_readItem(Tensor<T>, int) - Method in class neureka.devices.AbstractDevice
+
_readItem(Tensor<T>, int) - Method in class neureka.devices.AbstractDevice
 
-
_readItem(Tensor<T>, int) - Method in class neureka.devices.host.CPU
+
_readItem(Tensor<T>, int) - Method in class neureka.devices.host.CPU
 
-
_readItem(Tensor<T>, int) - Method in class neureka.devices.opencl.OpenCLDevice
+
_readItem(Tensor<T>, int) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_refCounter - Variable in class neureka.devices.AbstractDeviceData
+
_refCounter - Variable in class neureka.devices.AbstractDeviceData
 
-
_removeOrReject(T) - Method in class neureka.backend.api.Extensions
+
_removeOrReject(T) - Method in class neureka.backend.api.Extensions
 
-
_removeOrReject(T) - Method in class neureka.common.composition.AbstractComponentOwner
+
_removeOrReject(T) - Method in class neureka.common.composition.AbstractComponentOwner
An implementation of this method checks if the passed component should be removed from the component collection of this class or its removal should be "rejected".
-
_removeOrReject(T) - Method in class neureka.math.args.Args
+
_removeOrReject(T) - Method in class neureka.math.args.Args
 
-
_set(Component<T>) - Method in class neureka.common.composition.AbstractComponentOwner
+
_set(Component<T>) - Method in class neureka.common.composition.AbstractComponentOwner
 
-
_setOrReject(T) - Method in class neureka.backend.api.Extensions
+
_setOrReject(T) - Method in class neureka.backend.api.Extensions
 
-
_setOrReject(T) - Method in class neureka.common.composition.AbstractComponentOwner
+
_setOrReject(T) - Method in class neureka.common.composition.AbstractComponentOwner
This abstract method ought to be implemented further down the inheritance hierarchy where it's responsibility @@ -10282,111 +10211,153 @@

_

should be added or "rejected" to the component collection of this class.
-
_setOrReject(T) - Method in class neureka.math.args.Args
+
_setOrReject(T) - Method in class neureka.math.args.Args
 
-
_shape - Variable in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
+
_shape - Variable in class neureka.ndim.config.types.permuted.Permuted1DConfiguration
The shape of the NDArray.
-
_shape - Variable in class neureka.ndim.config.types.simple.Simple1DConfiguration
+
_shape - Variable in class neureka.ndim.config.types.simple.Simple1DConfiguration
The shape of the NDArray.
-
_shape - Variable in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
+
_shape - Variable in class neureka.ndim.config.types.sliced.Sliced1DConfiguration
The shape of the NDArray.
-
_shape1 - Variable in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
+
_shape1 - Variable in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
The shape of the NDArray.
-
_shape1 - Variable in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
_shape1 - Variable in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
The shape of the NDArray.
-
_shape1 - Variable in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
_shape1 - Variable in class neureka.ndim.config.types.simple.Simple2DConfiguration
The shape of the NDArray.
-
_shape1 - Variable in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
_shape1 - Variable in class neureka.ndim.config.types.simple.Simple3DConfiguration
The shape of the NDArray.
-
_shape1 - Variable in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
_shape1 - Variable in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
The shape of the NDArray.
-
_shape1 - Variable in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
_shape1 - Variable in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
The shape of the NDArray.
-
_shape2 - Variable in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
-
 
-
_shape2 - Variable in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
_shape2 - Variable in class neureka.ndim.config.types.permuted.Permuted2DConfiguration
 
-
_shape2 - Variable in class neureka.ndim.config.types.simple.Simple2DConfiguration
+
_shape2 - Variable in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
 
-
_shape2 - Variable in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
_shape2 - Variable in class neureka.ndim.config.types.simple.Simple2DConfiguration
 
-
_shape2 - Variable in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
+
_shape2 - Variable in class neureka.ndim.config.types.simple.Simple3DConfiguration
 
-
_shape2 - Variable in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
_shape2 - Variable in class neureka.ndim.config.types.sliced.Sliced2DConfiguration
 
-
_shape3 - Variable in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
+
_shape2 - Variable in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
 
-
_shape3 - Variable in class neureka.ndim.config.types.simple.Simple3DConfiguration
+
_shape3 - Variable in class neureka.ndim.config.types.permuted.Permuted3DConfiguration
 
-
_shape3 - Variable in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
+
_shape3 - Variable in class neureka.ndim.config.types.simple.Simple3DConfiguration
 
-
_simpleReshape(int[], NDConfiguration) - Static method in class neureka.ndim.config.AbstractNDC
+
_shape3 - Variable in class neureka.ndim.config.types.sliced.Sliced3DConfiguration
 
-
_size - Variable in class neureka.devices.file.IDXHandle
+
_simpleReshape(int[], NDConfiguration) - Static method in class neureka.ndim.config.AbstractNDC
 
-
_sizeOccupiedBy(Tensor<T>) - Method in class neureka.devices.AbstractDevice
+
_sizeOccupiedBy(Tensor<T>) - Method in class neureka.devices.AbstractDevice
 
-
_sizeOccupiedBy(Tensor<T>) - Method in class neureka.devices.host.CPU
+
_sizeOccupiedBy(Tensor<T>) - Method in class neureka.devices.host.CPU
 
-
_sizeOccupiedBy(Tensor<T>) - Method in class neureka.devices.opencl.OpenCLDevice
+
_sizeOccupiedBy(Tensor<T>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_swap(Tensor<T>, Tensor<T>) - Method in class neureka.devices.AbstractDevice
+
_swap(Tensor<T>, Tensor<T>) - Method in class neureka.devices.AbstractDevice
This method is used internally mostly and should not be used in most cases.
-
_swap(Tensor<T>, Tensor<T>) - Method in class neureka.devices.host.CPU
+
_swap(Tensor<T>, Tensor<T>) - Method in class neureka.devices.host.CPU
 
-
_swap(Tensor<T>, Tensor<T>) - Method in class neureka.devices.opencl.OpenCLDevice
+
_swap(Tensor<T>, Tensor<T>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_this() - Method in class neureka.common.composition.AbstractComponentOwner
+
_this() - Method in class neureka.common.composition.AbstractComponentOwner
 
-
_transferFrom(AbstractComponentOwner<C>) - Method in class neureka.common.composition.AbstractComponentOwner
+
_transferFrom(AbstractComponentOwner<C>) - Method in class neureka.common.composition.AbstractComponentOwner
A component owner might need to exchange components.
-
_virtualize(Tensor<?>) - Method in class neureka.devices.AbstractDevice
+
_virtualize(Tensor<?>) - Method in class neureka.devices.AbstractDevice
 
-
_virtualize(Tensor<?>) - Method in class neureka.devices.host.CPU
+
_virtualize(Tensor<?>) - Method in class neureka.devices.host.CPU
 
-
_virtualize(Tensor<?>) - Method in class neureka.devices.opencl.OpenCLDevice
+
_virtualize(Tensor<?>) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_work(int, int) - Method in class neureka.devices.host.concurrent.WorkScheduler
+
_work(int, int) - Method in class neureka.devices.host.concurrent.WorkScheduler
 
-
_workloadFor(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
+
_workloadFor(ExecutionCall<CPU>) - Method in class neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
 
-
_writeArray(Tensor<T>, Object, int, int, int) - Method in class neureka.devices.AbstractDevice
+
_writeArray(Tensor<T>, Object, int, int, int) - Method in class neureka.devices.AbstractDevice
 
-
_writeArray(Tensor<T>, Object, int, int, int) - Method in class neureka.devices.host.CPU
+
_writeArray(Tensor<T>, Object, int, int, int) - Method in class neureka.devices.host.CPU
 
-
_writeArray(Tensor<T>, Object, int, int, int) - Method in class neureka.devices.opencl.OpenCLDevice
+
_writeArray(Tensor<T>, Object, int, int, int) - Method in class neureka.devices.opencl.OpenCLDevice
 
-
_writeItem(Tensor<T>, T, int, int) - Method in class neureka.devices.AbstractDevice
+
_writeItem(Tensor<T>, T, int, int) - Method in class neureka.devices.AbstractDevice
 
-
_writeItem(Tensor<T>, T, int, int) - Method in class neureka.devices.host.CPU
+
_writeItem(Tensor<T>, T, int, int) - Method in class neureka.devices.host.CPU
 
-
_writeItem(Tensor<T>, T, int, int) - Method in class neureka.devices.opencl.OpenCLDevice
+
_writeItem(Tensor<T>, T, int, int) - Method in class neureka.devices.opencl.OpenCLDevice
 
-A B C D E F G H I J K L M N O P Q R S T U V W X _ 
All Classes and Interfaces|All Packages|Constant Field Values +A B C D E F G H I J K L M N O P Q R S T U V W X _  + +
+ + + + + + +
+ + diff --git a/docs/jdocs/index.html b/docs/jdocs/index.html index 263403389..394815a3f 100644 --- a/docs/jdocs/index.html +++ b/docs/jdocs/index.html @@ -1,263 +1,75 @@ - + + - -Overview (neureka 1.0.0 API) - - - - - - - - - - + +neureka 1.0.1 API + - - + + + + + + + <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> -<div class="flex-box"> -<header role="banner" class="flex-header"> -<nav role="navigation"> -<!-- ========= START OF TOP NAVBAR ======= --> -<div class="top-nav" id="navbar-top"> -<div class="skip-nav"><a href="#skip-navbar-top" title="Skip navigation links">Skip navigation links</a></div> -<ul id="navbar-top-firstrow" class="nav-list" title="Navigation"> -<li class="nav-bar-cell1-rev">Overview</li> -<li>Package</li> -<li>Class</li> -<li><a href="overview-tree.html">Tree</a></li> -<li><a href="deprecated-list.html">Deprecated</a></li> -<li><a href="index-all.html">Index</a></li> -<li><a href="help-doc.html#overview">Help</a></li> -</ul> -</div> -<div class="sub-nav"> -<div class="nav-list-search"><label for="search-input">SEARCH:</label> -<input type="text" id="search-input" value="search" disabled="disabled"> -<input type="reset" id="reset-button" value="reset" disabled="disabled"> -</div> -</div> -<!-- ========= END OF TOP NAVBAR ========= --> -<span class="skip-nav" id="skip-navbar-top"></span></nav> -</header> -<div class="flex-content"> -<main role="main"> -<div class="header"> -<h1 class="title">neureka 1.0.0 API</h1> -</div> -<div id="all-packages-table"> -<div class="caption"><span>Packages</span></div> -<div class="summary-table two-column-summary"> -<div class="table-header col-first">Package</div> -<div class="table-header col-last">Description</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/package-summary.html">neureka</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/autograd/package-summary.html">neureka.autograd</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/api/package-summary.html">neureka.backend.api</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/api/fun/package-summary.html">neureka.backend.api.fun</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/api/ini/package-summary.html">neureka.backend.api.ini</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/api/template/algorithms/package-summary.html">neureka.backend.api.template.algorithms</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/api/template/implementations/package-summary.html">neureka.backend.api.template.implementations</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/api/template/operations/package-summary.html">neureka.backend.api.template.operations</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/cpu/package-summary.html">neureka.backend.cpu</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/algorithms/package-summary.html">neureka.backend.main.algorithms</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/algorithms/internal/package-summary.html">neureka.backend.main.algorithms.internal</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/package-summary.html">neureka.backend.main.implementations</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/broadcast/package-summary.html">neureka.backend.main.implementations.broadcast</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/convolution/package-summary.html">neureka.backend.main.implementations.convolution</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/elementwise/package-summary.html">neureka.backend.main.implementations.elementwise</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/fun/package-summary.html">neureka.backend.main.implementations.fun</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/fun/api/package-summary.html">neureka.backend.main.implementations.fun.api</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/linear/package-summary.html">neureka.backend.main.implementations.linear</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/matmul/package-summary.html">neureka.backend.main.implementations.matmul</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/implementations/scalar/package-summary.html">neureka.backend.main.implementations.scalar</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/internal/package-summary.html">neureka.backend.main.internal</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/memory/package-summary.html">neureka.backend.main.memory</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/package-summary.html">neureka.backend.main.operations</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/functions/package-summary.html">neureka.backend.main.operations.functions</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/indexer/package-summary.html">neureka.backend.main.operations.indexer</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/linear/package-summary.html">neureka.backend.main.operations.linear</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/linear/internal/blas/package-summary.html">neureka.backend.main.operations.linear.internal.blas</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/linear/internal/opencl/package-summary.html">neureka.backend.main.operations.linear.internal.opencl</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/operator/package-summary.html">neureka.backend.main.operations.operator</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/other/package-summary.html">neureka.backend.main.operations.other</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/main/operations/other/internal/package-summary.html">neureka.backend.main.operations.other.internal</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/backend/ocl/package-summary.html">neureka.backend.ocl</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/common/composition/package-summary.html">neureka.common.composition</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/common/utility/package-summary.html">neureka.common.utility</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/devices/package-summary.html">neureka.devices</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/devices/file/package-summary.html">neureka.devices.file</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/devices/host/package-summary.html">neureka.devices.host</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/devices/host/concurrent/package-summary.html">neureka.devices.host.concurrent</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/devices/host/machine/package-summary.html">neureka.devices.host.machine</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/devices/opencl/package-summary.html">neureka.devices.opencl</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/devices/opencl/utility/package-summary.html">neureka.devices.opencl.utility</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/dtype/package-summary.html">neureka.dtype</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/dtype/custom/package-summary.html">neureka.dtype.custom</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/fluent/building/package-summary.html">neureka.fluent.building</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/fluent/building/states/package-summary.html">neureka.fluent.building.states</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/fluent/slicing/package-summary.html">neureka.fluent.slicing</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/fluent/slicing/states/package-summary.html">neureka.fluent.slicing.states</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/framing/package-summary.html">neureka.framing</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/framing/fluent/package-summary.html">neureka.framing.fluent</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/math/package-summary.html">neureka.math</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/math/args/package-summary.html">neureka.math.args</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/math/implementations/package-summary.html">neureka.math.implementations</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/math/parsing/package-summary.html">neureka.math.parsing</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1"> -<div class="block">Everything in this package should be considered library-private! - <b>DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE!</b> - Code inside this package or any sub-packages might change frequently...</div> -</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/package-summary.html">neureka.ndim</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/config/package-summary.html">neureka.ndim.config</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/config/types/package-summary.html">neureka.ndim.config.types</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/config/types/permuted/package-summary.html">neureka.ndim.config.types.permuted</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/config/types/simple/package-summary.html">neureka.ndim.config.types.simple</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/config/types/sliced/package-summary.html">neureka.ndim.config.types.sliced</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/config/types/views/package-summary.html">neureka.ndim.config.types.views</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/config/types/views/virtual/package-summary.html">neureka.ndim.config.types.views.virtual</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/iterator/package-summary.html">neureka.ndim.iterator</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/iterator/types/permuted/package-summary.html">neureka.ndim.iterator.types.permuted</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/iterator/types/simple/package-summary.html">neureka.ndim.iterator.types.simple</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/iterator/types/sliced/package-summary.html">neureka.ndim.iterator.types.sliced</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/ndim/iterator/types/virtual/package-summary.html">neureka.ndim.iterator.types.virtual</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/optimization/package-summary.html">neureka.optimization</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first odd-row-color all-packages-table all-packages-table-tab1"><a href="neureka/optimization/implementations/package-summary.html">neureka.optimization.implementations</a></div> -<div class="col-last odd-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -<div class="col-first even-row-color all-packages-table all-packages-table-tab1"><a href="neureka/view/package-summary.html">neureka.view</a></div> -<div class="col-last even-row-color all-packages-table all-packages-table-tab1">&nbsp;</div> -</div> -</div> -</main> -</div> -</div> -</body> +<h2>Frame Alert</h2> +<p>This document is designed to be viewed using the frames feature. If you see this message, you are using a non-frame-capable web client. Link to <a href="overview-summary.html">Non-frame version</a>.</p> + + diff --git a/docs/jdocs/jquery-ui.overrides.css b/docs/jdocs/jquery-ui.overrides.css deleted file mode 100644 index f89acb632..000000000 --- a/docs/jdocs/jquery-ui.overrides.css +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -.ui-state-active, -.ui-widget-content .ui-state-active, -.ui-widget-header .ui-state-active, -a.ui-button:active, -.ui-button:active, -.ui-button.ui-state-active:hover { - /* Overrides the color of selection used in jQuery UI */ - background: #F8981D; -} diff --git a/docs/jdocs/jquery/external/jquery/jquery.js b/docs/jdocs/jquery/external/jquery/jquery.js deleted file mode 100644 index 50937333b..000000000 --- a/docs/jdocs/jquery/external/jquery/jquery.js +++ /dev/null @@ -1,10872 +0,0 @@ -/*! - * jQuery JavaScript Library v3.5.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2020-05-04T22:49Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.5.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.5 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2020-03-14 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px"; - tr.style.height = "1px"; - trChild.style.height = "9px"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = parseInt( trStyle.height ) > 3; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( - dataPriv.get( cur, "events" ) || Object.create( null ) - )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script - if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( "\r\n"; - -// inject VBScript -document.write(IEBinaryToArray_ByteStr_Script); - -global.JSZipUtils._getBinaryFromXHR = function (xhr) { - var binary = xhr.responseBody; - var byteMapping = {}; - for ( var i = 0; i < 256; i++ ) { - for ( var j = 0; j < 256; j++ ) { - byteMapping[ String.fromCharCode( i + (j << 8) ) ] = - String.fromCharCode(i) + String.fromCharCode(j); - } - } - var rawBytes = IEBinaryToArray_ByteStr(binary); - var lastChr = IEBinaryToArray_ByteStr_Last(binary); - return rawBytes.replace(/[\s\S]/g, function( match ) { - return byteMapping[match]; - }) + lastChr; -}; - -// enforcing Stuk's coding style -// vim: set shiftwidth=4 softtabstop=4: - -},{}]},{},[1]) -; diff --git a/docs/jdocs/jquery/jszip-utils/dist/jszip-utils-ie.min.js b/docs/jdocs/jquery/jszip-utils/dist/jszip-utils-ie.min.js deleted file mode 100644 index 93d8bc8ef..000000000 --- a/docs/jdocs/jquery/jszip-utils/dist/jszip-utils-ie.min.js +++ /dev/null @@ -1,10 +0,0 @@ -/*! - -JSZipUtils - A collection of cross-browser utilities to go along with JSZip. - - -(c) 2014 Stuart Knightley, David Duponchel -Dual licenced under the MIT license or GPLv3. See https://raw.github.com/Stuk/jszip-utils/master/LICENSE.markdown. - -*/ -!function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g\r\n";document.write(b),a.JSZipUtils._getBinaryFromXHR=function(a){for(var b=a.responseBody,c={},d=0;256>d;d++)for(var e=0;256>e;e++)c[String.fromCharCode(d+(e<<8))]=String.fromCharCode(d)+String.fromCharCode(e);var f=IEBinaryToArray_ByteStr(b),g=IEBinaryToArray_ByteStr_Last(b);return f.replace(/[\s\S]/g,function(a){return c[a]})+g}},{}]},{},[1]); diff --git a/docs/jdocs/jquery/jszip-utils/dist/jszip-utils.js b/docs/jdocs/jquery/jszip-utils/dist/jszip-utils.js deleted file mode 100644 index 775895ec9..000000000 --- a/docs/jdocs/jquery/jszip-utils/dist/jszip-utils.js +++ /dev/null @@ -1,118 +0,0 @@ -/*! - -JSZipUtils - A collection of cross-browser utilities to go along with JSZip. - - -(c) 2014 Stuart Knightley, David Duponchel -Dual licenced under the MIT license or GPLv3. See https://raw.github.com/Stuk/jszip-utils/master/LICENSE.markdown. - -*/ -!function(e){"object"==typeof exports?module.exports=e():"function"==typeof define&&define.amd?define(e):"undefined"!=typeof window?window.JSZipUtils=e():"undefined"!=typeof global?global.JSZipUtils=e():"undefined"!=typeof self&&(self.JSZipUtils=e())}(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);throw new Error("Cannot find module '"+o+"'")}var f=n[o]={exports:{}};t[o][0].call(f.exports,function(e){var n=t[o][1][e];return s(n?n:e)},f,f.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o - -(c) 2014 Stuart Knightley, David Duponchel -Dual licenced under the MIT license or GPLv3. See https://raw.github.com/Stuk/jszip-utils/master/LICENSE.markdown. - -*/ -!function(a){"object"==typeof exports?module.exports=a():"function"==typeof define&&define.amd?define(a):"undefined"!=typeof window?window.JSZipUtils=a():"undefined"!=typeof global?global.JSZipUtils=a():"undefined"!=typeof self&&(self.JSZipUtils=a())}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g - -(c) 2009-2016 Stuart Knightley -Dual licenced under the MIT license or GPLv3. See https://raw.github.com/Stuk/jszip/master/LICENSE.markdown. - -JSZip uses the library pako released under the MIT license : -https://github.com/nodeca/pako/blob/master/LICENSE -*/ - -(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.JSZip = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o> 2; - enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); - enc3 = remainingBytes > 1 ? (((chr2 & 15) << 2) | (chr3 >> 6)) : 64; - enc4 = remainingBytes > 2 ? (chr3 & 63) : 64; - - output.push(_keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4)); - - } - - return output.join(""); -}; - -// public method for decoding -exports.decode = function(input) { - var chr1, chr2, chr3; - var enc1, enc2, enc3, enc4; - var i = 0, resultIndex = 0; - - var dataUrlPrefix = "data:"; - - if (input.substr(0, dataUrlPrefix.length) === dataUrlPrefix) { - // This is a common error: people give a data url - // (data:image/png;base64,iVBOR...) with a {base64: true} and - // wonders why things don't work. - // We can detect that the string input looks like a data url but we - // *can't* be sure it is one: removing everything up to the comma would - // be too dangerous. - throw new Error("Invalid base64 input, it looks like a data url."); - } - - input = input.replace(/[^A-Za-z0-9\+\/\=]/g, ""); - - var totalLength = input.length * 3 / 4; - if(input.charAt(input.length - 1) === _keyStr.charAt(64)) { - totalLength--; - } - if(input.charAt(input.length - 2) === _keyStr.charAt(64)) { - totalLength--; - } - if (totalLength % 1 !== 0) { - // totalLength is not an integer, the length does not match a valid - // base64 content. That can happen if: - // - the input is not a base64 content - // - the input is *almost* a base64 content, with a extra chars at the - // beginning or at the end - // - the input uses a base64 variant (base64url for example) - throw new Error("Invalid base64 input, bad content length."); - } - var output; - if (support.uint8array) { - output = new Uint8Array(totalLength|0); - } else { - output = new Array(totalLength|0); - } - - while (i < input.length) { - - enc1 = _keyStr.indexOf(input.charAt(i++)); - enc2 = _keyStr.indexOf(input.charAt(i++)); - enc3 = _keyStr.indexOf(input.charAt(i++)); - enc4 = _keyStr.indexOf(input.charAt(i++)); - - chr1 = (enc1 << 2) | (enc2 >> 4); - chr2 = ((enc2 & 15) << 4) | (enc3 >> 2); - chr3 = ((enc3 & 3) << 6) | enc4; - - output[resultIndex++] = chr1; - - if (enc3 !== 64) { - output[resultIndex++] = chr2; - } - if (enc4 !== 64) { - output[resultIndex++] = chr3; - } - - } - - return output; -}; - -},{"./support":30,"./utils":32}],2:[function(require,module,exports){ -'use strict'; - -var external = require("./external"); -var DataWorker = require('./stream/DataWorker'); -var DataLengthProbe = require('./stream/DataLengthProbe'); -var Crc32Probe = require('./stream/Crc32Probe'); -var DataLengthProbe = require('./stream/DataLengthProbe'); - -/** - * Represent a compressed object, with everything needed to decompress it. - * @constructor - * @param {number} compressedSize the size of the data compressed. - * @param {number} uncompressedSize the size of the data after decompression. - * @param {number} crc32 the crc32 of the decompressed file. - * @param {object} compression the type of compression, see lib/compressions.js. - * @param {String|ArrayBuffer|Uint8Array|Buffer} data the compressed data. - */ -function CompressedObject(compressedSize, uncompressedSize, crc32, compression, data) { - this.compressedSize = compressedSize; - this.uncompressedSize = uncompressedSize; - this.crc32 = crc32; - this.compression = compression; - this.compressedContent = data; -} - -CompressedObject.prototype = { - /** - * Create a worker to get the uncompressed content. - * @return {GenericWorker} the worker. - */ - getContentWorker : function () { - var worker = new DataWorker(external.Promise.resolve(this.compressedContent)) - .pipe(this.compression.uncompressWorker()) - .pipe(new DataLengthProbe("data_length")); - - var that = this; - worker.on("end", function () { - if(this.streamInfo['data_length'] !== that.uncompressedSize) { - throw new Error("Bug : uncompressed data size mismatch"); - } - }); - return worker; - }, - /** - * Create a worker to get the compressed content. - * @return {GenericWorker} the worker. - */ - getCompressedWorker : function () { - return new DataWorker(external.Promise.resolve(this.compressedContent)) - .withStreamInfo("compressedSize", this.compressedSize) - .withStreamInfo("uncompressedSize", this.uncompressedSize) - .withStreamInfo("crc32", this.crc32) - .withStreamInfo("compression", this.compression) - ; - } -}; - -/** - * Chain the given worker with other workers to compress the content with the - * given compresion. - * @param {GenericWorker} uncompressedWorker the worker to pipe. - * @param {Object} compression the compression object. - * @param {Object} compressionOptions the options to use when compressing. - * @return {GenericWorker} the new worker compressing the content. - */ -CompressedObject.createWorkerFrom = function (uncompressedWorker, compression, compressionOptions) { - return uncompressedWorker - .pipe(new Crc32Probe()) - .pipe(new DataLengthProbe("uncompressedSize")) - .pipe(compression.compressWorker(compressionOptions)) - .pipe(new DataLengthProbe("compressedSize")) - .withStreamInfo("compression", compression); -}; - -module.exports = CompressedObject; - -},{"./external":6,"./stream/Crc32Probe":25,"./stream/DataLengthProbe":26,"./stream/DataWorker":27}],3:[function(require,module,exports){ -'use strict'; - -var GenericWorker = require("./stream/GenericWorker"); - -exports.STORE = { - magic: "\x00\x00", - compressWorker : function (compressionOptions) { - return new GenericWorker("STORE compression"); - }, - uncompressWorker : function () { - return new GenericWorker("STORE decompression"); - } -}; -exports.DEFLATE = require('./flate'); - -},{"./flate":7,"./stream/GenericWorker":28}],4:[function(require,module,exports){ -'use strict'; - -var utils = require('./utils'); - -/** - * The following functions come from pako, from pako/lib/zlib/crc32.js - * released under the MIT license, see pako https://github.com/nodeca/pako/ - */ - -// Use ordinary array, since untyped makes no boost here -function makeTable() { - var c, table = []; - - for(var n =0; n < 256; n++){ - c = n; - for(var k =0; k < 8; k++){ - c = ((c&1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); - } - table[n] = c; - } - - return table; -} - -// Create table on load. Just 255 signed longs. Not a problem. -var crcTable = makeTable(); - - -function crc32(crc, buf, len, pos) { - var t = crcTable, end = pos + len; - - crc = crc ^ (-1); - - for (var i = pos; i < end; i++ ) { - crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; - } - - return (crc ^ (-1)); // >>> 0; -} - -// That's all for the pako functions. - -/** - * Compute the crc32 of a string. - * This is almost the same as the function crc32, but for strings. Using the - * same function for the two use cases leads to horrible performances. - * @param {Number} crc the starting value of the crc. - * @param {String} str the string to use. - * @param {Number} len the length of the string. - * @param {Number} pos the starting position for the crc32 computation. - * @return {Number} the computed crc32. - */ -function crc32str(crc, str, len, pos) { - var t = crcTable, end = pos + len; - - crc = crc ^ (-1); - - for (var i = pos; i < end; i++ ) { - crc = (crc >>> 8) ^ t[(crc ^ str.charCodeAt(i)) & 0xFF]; - } - - return (crc ^ (-1)); // >>> 0; -} - -module.exports = function crc32wrapper(input, crc) { - if (typeof input === "undefined" || !input.length) { - return 0; - } - - var isArray = utils.getTypeOf(input) !== "string"; - - if(isArray) { - return crc32(crc|0, input, input.length, 0); - } else { - return crc32str(crc|0, input, input.length, 0); - } -}; - -},{"./utils":32}],5:[function(require,module,exports){ -'use strict'; -exports.base64 = false; -exports.binary = false; -exports.dir = false; -exports.createFolders = true; -exports.date = null; -exports.compression = null; -exports.compressionOptions = null; -exports.comment = null; -exports.unixPermissions = null; -exports.dosPermissions = null; - -},{}],6:[function(require,module,exports){ -/* global Promise */ -'use strict'; - -// load the global object first: -// - it should be better integrated in the system (unhandledRejection in node) -// - the environment may have a custom Promise implementation (see zone.js) -var ES6Promise = null; -if (typeof Promise !== "undefined") { - ES6Promise = Promise; -} else { - ES6Promise = require("lie"); -} - -/** - * Let the user use/change some implementations. - */ -module.exports = { - Promise: ES6Promise -}; - -},{"lie":37}],7:[function(require,module,exports){ -'use strict'; -var USE_TYPEDARRAY = (typeof Uint8Array !== 'undefined') && (typeof Uint16Array !== 'undefined') && (typeof Uint32Array !== 'undefined'); - -var pako = require("pako"); -var utils = require("./utils"); -var GenericWorker = require("./stream/GenericWorker"); - -var ARRAY_TYPE = USE_TYPEDARRAY ? "uint8array" : "array"; - -exports.magic = "\x08\x00"; - -/** - * Create a worker that uses pako to inflate/deflate. - * @constructor - * @param {String} action the name of the pako function to call : either "Deflate" or "Inflate". - * @param {Object} options the options to use when (de)compressing. - */ -function FlateWorker(action, options) { - GenericWorker.call(this, "FlateWorker/" + action); - - this._pako = null; - this._pakoAction = action; - this._pakoOptions = options; - // the `meta` object from the last chunk received - // this allow this worker to pass around metadata - this.meta = {}; -} - -utils.inherits(FlateWorker, GenericWorker); - -/** - * @see GenericWorker.processChunk - */ -FlateWorker.prototype.processChunk = function (chunk) { - this.meta = chunk.meta; - if (this._pako === null) { - this._createPako(); - } - this._pako.push(utils.transformTo(ARRAY_TYPE, chunk.data), false); -}; - -/** - * @see GenericWorker.flush - */ -FlateWorker.prototype.flush = function () { - GenericWorker.prototype.flush.call(this); - if (this._pako === null) { - this._createPako(); - } - this._pako.push([], true); -}; -/** - * @see GenericWorker.cleanUp - */ -FlateWorker.prototype.cleanUp = function () { - GenericWorker.prototype.cleanUp.call(this); - this._pako = null; -}; - -/** - * Create the _pako object. - * TODO: lazy-loading this object isn't the best solution but it's the - * quickest. The best solution is to lazy-load the worker list. See also the - * issue #446. - */ -FlateWorker.prototype._createPako = function () { - this._pako = new pako[this._pakoAction]({ - raw: true, - level: this._pakoOptions.level || -1 // default compression - }); - var self = this; - this._pako.onData = function(data) { - self.push({ - data : data, - meta : self.meta - }); - }; -}; - -exports.compressWorker = function (compressionOptions) { - return new FlateWorker("Deflate", compressionOptions); -}; -exports.uncompressWorker = function () { - return new FlateWorker("Inflate", {}); -}; - -},{"./stream/GenericWorker":28,"./utils":32,"pako":38}],8:[function(require,module,exports){ -'use strict'; - -var utils = require('../utils'); -var GenericWorker = require('../stream/GenericWorker'); -var utf8 = require('../utf8'); -var crc32 = require('../crc32'); -var signature = require('../signature'); - -/** - * Transform an integer into a string in hexadecimal. - * @private - * @param {number} dec the number to convert. - * @param {number} bytes the number of bytes to generate. - * @returns {string} the result. - */ -var decToHex = function(dec, bytes) { - var hex = "", i; - for (i = 0; i < bytes; i++) { - hex += String.fromCharCode(dec & 0xff); - dec = dec >>> 8; - } - return hex; -}; - -/** - * Generate the UNIX part of the external file attributes. - * @param {Object} unixPermissions the unix permissions or null. - * @param {Boolean} isDir true if the entry is a directory, false otherwise. - * @return {Number} a 32 bit integer. - * - * adapted from http://unix.stackexchange.com/questions/14705/the-zip-formats-external-file-attribute : - * - * TTTTsstrwxrwxrwx0000000000ADVSHR - * ^^^^____________________________ file type, see zipinfo.c (UNX_*) - * ^^^_________________________ setuid, setgid, sticky - * ^^^^^^^^^________________ permissions - * ^^^^^^^^^^______ not used ? - * ^^^^^^ DOS attribute bits : Archive, Directory, Volume label, System file, Hidden, Read only - */ -var generateUnixExternalFileAttr = function (unixPermissions, isDir) { - - var result = unixPermissions; - if (!unixPermissions) { - // I can't use octal values in strict mode, hence the hexa. - // 040775 => 0x41fd - // 0100664 => 0x81b4 - result = isDir ? 0x41fd : 0x81b4; - } - return (result & 0xFFFF) << 16; -}; - -/** - * Generate the DOS part of the external file attributes. - * @param {Object} dosPermissions the dos permissions or null. - * @param {Boolean} isDir true if the entry is a directory, false otherwise. - * @return {Number} a 32 bit integer. - * - * Bit 0 Read-Only - * Bit 1 Hidden - * Bit 2 System - * Bit 3 Volume Label - * Bit 4 Directory - * Bit 5 Archive - */ -var generateDosExternalFileAttr = function (dosPermissions, isDir) { - - // the dir flag is already set for compatibility - return (dosPermissions || 0) & 0x3F; -}; - -/** - * Generate the various parts used in the construction of the final zip file. - * @param {Object} streamInfo the hash with informations about the compressed file. - * @param {Boolean} streamedContent is the content streamed ? - * @param {Boolean} streamingEnded is the stream finished ? - * @param {number} offset the current offset from the start of the zip file. - * @param {String} platform let's pretend we are this platform (change platform dependents fields) - * @param {Function} encodeFileName the function to encode the file name / comment. - * @return {Object} the zip parts. - */ -var generateZipParts = function(streamInfo, streamedContent, streamingEnded, offset, platform, encodeFileName) { - var file = streamInfo['file'], - compression = streamInfo['compression'], - useCustomEncoding = encodeFileName !== utf8.utf8encode, - encodedFileName = utils.transformTo("string", encodeFileName(file.name)), - utfEncodedFileName = utils.transformTo("string", utf8.utf8encode(file.name)), - comment = file.comment, - encodedComment = utils.transformTo("string", encodeFileName(comment)), - utfEncodedComment = utils.transformTo("string", utf8.utf8encode(comment)), - useUTF8ForFileName = utfEncodedFileName.length !== file.name.length, - useUTF8ForComment = utfEncodedComment.length !== comment.length, - dosTime, - dosDate, - extraFields = "", - unicodePathExtraField = "", - unicodeCommentExtraField = "", - dir = file.dir, - date = file.date; - - - var dataInfo = { - crc32 : 0, - compressedSize : 0, - uncompressedSize : 0 - }; - - // if the content is streamed, the sizes/crc32 are only available AFTER - // the end of the stream. - if (!streamedContent || streamingEnded) { - dataInfo.crc32 = streamInfo['crc32']; - dataInfo.compressedSize = streamInfo['compressedSize']; - dataInfo.uncompressedSize = streamInfo['uncompressedSize']; - } - - var bitflag = 0; - if (streamedContent) { - // Bit 3: the sizes/crc32 are set to zero in the local header. - // The correct values are put in the data descriptor immediately - // following the compressed data. - bitflag |= 0x0008; - } - if (!useCustomEncoding && (useUTF8ForFileName || useUTF8ForComment)) { - // Bit 11: Language encoding flag (EFS). - bitflag |= 0x0800; - } - - - var extFileAttr = 0; - var versionMadeBy = 0; - if (dir) { - // dos or unix, we set the dos dir flag - extFileAttr |= 0x00010; - } - if(platform === "UNIX") { - versionMadeBy = 0x031E; // UNIX, version 3.0 - extFileAttr |= generateUnixExternalFileAttr(file.unixPermissions, dir); - } else { // DOS or other, fallback to DOS - versionMadeBy = 0x0014; // DOS, version 2.0 - extFileAttr |= generateDosExternalFileAttr(file.dosPermissions, dir); - } - - // date - // @see http://www.delorie.com/djgpp/doc/rbinter/it/52/13.html - // @see http://www.delorie.com/djgpp/doc/rbinter/it/65/16.html - // @see http://www.delorie.com/djgpp/doc/rbinter/it/66/16.html - - dosTime = date.getUTCHours(); - dosTime = dosTime << 6; - dosTime = dosTime | date.getUTCMinutes(); - dosTime = dosTime << 5; - dosTime = dosTime | date.getUTCSeconds() / 2; - - dosDate = date.getUTCFullYear() - 1980; - dosDate = dosDate << 4; - dosDate = dosDate | (date.getUTCMonth() + 1); - dosDate = dosDate << 5; - dosDate = dosDate | date.getUTCDate(); - - if (useUTF8ForFileName) { - // set the unicode path extra field. unzip needs at least one extra - // field to correctly handle unicode path, so using the path is as good - // as any other information. This could improve the situation with - // other archive managers too. - // This field is usually used without the utf8 flag, with a non - // unicode path in the header (winrar, winzip). This helps (a bit) - // with the messy Windows' default compressed folders feature but - // breaks on p7zip which doesn't seek the unicode path extra field. - // So for now, UTF-8 everywhere ! - unicodePathExtraField = - // Version - decToHex(1, 1) + - // NameCRC32 - decToHex(crc32(encodedFileName), 4) + - // UnicodeName - utfEncodedFileName; - - extraFields += - // Info-ZIP Unicode Path Extra Field - "\x75\x70" + - // size - decToHex(unicodePathExtraField.length, 2) + - // content - unicodePathExtraField; - } - - if(useUTF8ForComment) { - - unicodeCommentExtraField = - // Version - decToHex(1, 1) + - // CommentCRC32 - decToHex(crc32(encodedComment), 4) + - // UnicodeName - utfEncodedComment; - - extraFields += - // Info-ZIP Unicode Path Extra Field - "\x75\x63" + - // size - decToHex(unicodeCommentExtraField.length, 2) + - // content - unicodeCommentExtraField; - } - - var header = ""; - - // version needed to extract - header += "\x0A\x00"; - // general purpose bit flag - header += decToHex(bitflag, 2); - // compression method - header += compression.magic; - // last mod file time - header += decToHex(dosTime, 2); - // last mod file date - header += decToHex(dosDate, 2); - // crc-32 - header += decToHex(dataInfo.crc32, 4); - // compressed size - header += decToHex(dataInfo.compressedSize, 4); - // uncompressed size - header += decToHex(dataInfo.uncompressedSize, 4); - // file name length - header += decToHex(encodedFileName.length, 2); - // extra field length - header += decToHex(extraFields.length, 2); - - - var fileRecord = signature.LOCAL_FILE_HEADER + header + encodedFileName + extraFields; - - var dirRecord = signature.CENTRAL_FILE_HEADER + - // version made by (00: DOS) - decToHex(versionMadeBy, 2) + - // file header (common to file and central directory) - header + - // file comment length - decToHex(encodedComment.length, 2) + - // disk number start - "\x00\x00" + - // internal file attributes TODO - "\x00\x00" + - // external file attributes - decToHex(extFileAttr, 4) + - // relative offset of local header - decToHex(offset, 4) + - // file name - encodedFileName + - // extra field - extraFields + - // file comment - encodedComment; - - return { - fileRecord: fileRecord, - dirRecord: dirRecord - }; -}; - -/** - * Generate the EOCD record. - * @param {Number} entriesCount the number of entries in the zip file. - * @param {Number} centralDirLength the length (in bytes) of the central dir. - * @param {Number} localDirLength the length (in bytes) of the local dir. - * @param {String} comment the zip file comment as a binary string. - * @param {Function} encodeFileName the function to encode the comment. - * @return {String} the EOCD record. - */ -var generateCentralDirectoryEnd = function (entriesCount, centralDirLength, localDirLength, comment, encodeFileName) { - var dirEnd = ""; - var encodedComment = utils.transformTo("string", encodeFileName(comment)); - - // end of central dir signature - dirEnd = signature.CENTRAL_DIRECTORY_END + - // number of this disk - "\x00\x00" + - // number of the disk with the start of the central directory - "\x00\x00" + - // total number of entries in the central directory on this disk - decToHex(entriesCount, 2) + - // total number of entries in the central directory - decToHex(entriesCount, 2) + - // size of the central directory 4 bytes - decToHex(centralDirLength, 4) + - // offset of start of central directory with respect to the starting disk number - decToHex(localDirLength, 4) + - // .ZIP file comment length - decToHex(encodedComment.length, 2) + - // .ZIP file comment - encodedComment; - - return dirEnd; -}; - -/** - * Generate data descriptors for a file entry. - * @param {Object} streamInfo the hash generated by a worker, containing informations - * on the file entry. - * @return {String} the data descriptors. - */ -var generateDataDescriptors = function (streamInfo) { - var descriptor = ""; - descriptor = signature.DATA_DESCRIPTOR + - // crc-32 4 bytes - decToHex(streamInfo['crc32'], 4) + - // compressed size 4 bytes - decToHex(streamInfo['compressedSize'], 4) + - // uncompressed size 4 bytes - decToHex(streamInfo['uncompressedSize'], 4); - - return descriptor; -}; - - -/** - * A worker to concatenate other workers to create a zip file. - * @param {Boolean} streamFiles `true` to stream the content of the files, - * `false` to accumulate it. - * @param {String} comment the comment to use. - * @param {String} platform the platform to use, "UNIX" or "DOS". - * @param {Function} encodeFileName the function to encode file names and comments. - */ -function ZipFileWorker(streamFiles, comment, platform, encodeFileName) { - GenericWorker.call(this, "ZipFileWorker"); - // The number of bytes written so far. This doesn't count accumulated chunks. - this.bytesWritten = 0; - // The comment of the zip file - this.zipComment = comment; - // The platform "generating" the zip file. - this.zipPlatform = platform; - // the function to encode file names and comments. - this.encodeFileName = encodeFileName; - // Should we stream the content of the files ? - this.streamFiles = streamFiles; - // If `streamFiles` is false, we will need to accumulate the content of the - // files to calculate sizes / crc32 (and write them *before* the content). - // This boolean indicates if we are accumulating chunks (it will change a lot - // during the lifetime of this worker). - this.accumulate = false; - // The buffer receiving chunks when accumulating content. - this.contentBuffer = []; - // The list of generated directory records. - this.dirRecords = []; - // The offset (in bytes) from the beginning of the zip file for the current source. - this.currentSourceOffset = 0; - // The total number of entries in this zip file. - this.entriesCount = 0; - // the name of the file currently being added, null when handling the end of the zip file. - // Used for the emited metadata. - this.currentFile = null; - - - - this._sources = []; -} -utils.inherits(ZipFileWorker, GenericWorker); - -/** - * @see GenericWorker.push - */ -ZipFileWorker.prototype.push = function (chunk) { - - var currentFilePercent = chunk.meta.percent || 0; - var entriesCount = this.entriesCount; - var remainingFiles = this._sources.length; - - if(this.accumulate) { - this.contentBuffer.push(chunk); - } else { - this.bytesWritten += chunk.data.length; - - GenericWorker.prototype.push.call(this, { - data : chunk.data, - meta : { - currentFile : this.currentFile, - percent : entriesCount ? (currentFilePercent + 100 * (entriesCount - remainingFiles - 1)) / entriesCount : 100 - } - }); - } -}; - -/** - * The worker started a new source (an other worker). - * @param {Object} streamInfo the streamInfo object from the new source. - */ -ZipFileWorker.prototype.openedSource = function (streamInfo) { - this.currentSourceOffset = this.bytesWritten; - this.currentFile = streamInfo['file'].name; - - var streamedContent = this.streamFiles && !streamInfo['file'].dir; - - // don't stream folders (because they don't have any content) - if(streamedContent) { - var record = generateZipParts(streamInfo, streamedContent, false, this.currentSourceOffset, this.zipPlatform, this.encodeFileName); - this.push({ - data : record.fileRecord, - meta : {percent:0} - }); - } else { - // we need to wait for the whole file before pushing anything - this.accumulate = true; - } -}; - -/** - * The worker finished a source (an other worker). - * @param {Object} streamInfo the streamInfo object from the finished source. - */ -ZipFileWorker.prototype.closedSource = function (streamInfo) { - this.accumulate = false; - var streamedContent = this.streamFiles && !streamInfo['file'].dir; - var record = generateZipParts(streamInfo, streamedContent, true, this.currentSourceOffset, this.zipPlatform, this.encodeFileName); - - this.dirRecords.push(record.dirRecord); - if(streamedContent) { - // after the streamed file, we put data descriptors - this.push({ - data : generateDataDescriptors(streamInfo), - meta : {percent:100} - }); - } else { - // the content wasn't streamed, we need to push everything now - // first the file record, then the content - this.push({ - data : record.fileRecord, - meta : {percent:0} - }); - while(this.contentBuffer.length) { - this.push(this.contentBuffer.shift()); - } - } - this.currentFile = null; -}; - -/** - * @see GenericWorker.flush - */ -ZipFileWorker.prototype.flush = function () { - - var localDirLength = this.bytesWritten; - for(var i = 0; i < this.dirRecords.length; i++) { - this.push({ - data : this.dirRecords[i], - meta : {percent:100} - }); - } - var centralDirLength = this.bytesWritten - localDirLength; - - var dirEnd = generateCentralDirectoryEnd(this.dirRecords.length, centralDirLength, localDirLength, this.zipComment, this.encodeFileName); - - this.push({ - data : dirEnd, - meta : {percent:100} - }); -}; - -/** - * Prepare the next source to be read. - */ -ZipFileWorker.prototype.prepareNextSource = function () { - this.previous = this._sources.shift(); - this.openedSource(this.previous.streamInfo); - if (this.isPaused) { - this.previous.pause(); - } else { - this.previous.resume(); - } -}; - -/** - * @see GenericWorker.registerPrevious - */ -ZipFileWorker.prototype.registerPrevious = function (previous) { - this._sources.push(previous); - var self = this; - - previous.on('data', function (chunk) { - self.processChunk(chunk); - }); - previous.on('end', function () { - self.closedSource(self.previous.streamInfo); - if(self._sources.length) { - self.prepareNextSource(); - } else { - self.end(); - } - }); - previous.on('error', function (e) { - self.error(e); - }); - return this; -}; - -/** - * @see GenericWorker.resume - */ -ZipFileWorker.prototype.resume = function () { - if(!GenericWorker.prototype.resume.call(this)) { - return false; - } - - if (!this.previous && this._sources.length) { - this.prepareNextSource(); - return true; - } - if (!this.previous && !this._sources.length && !this.generatedError) { - this.end(); - return true; - } -}; - -/** - * @see GenericWorker.error - */ -ZipFileWorker.prototype.error = function (e) { - var sources = this._sources; - if(!GenericWorker.prototype.error.call(this, e)) { - return false; - } - for(var i = 0; i < sources.length; i++) { - try { - sources[i].error(e); - } catch(e) { - // the `error` exploded, nothing to do - } - } - return true; -}; - -/** - * @see GenericWorker.lock - */ -ZipFileWorker.prototype.lock = function () { - GenericWorker.prototype.lock.call(this); - var sources = this._sources; - for(var i = 0; i < sources.length; i++) { - sources[i].lock(); - } -}; - -module.exports = ZipFileWorker; - -},{"../crc32":4,"../signature":23,"../stream/GenericWorker":28,"../utf8":31,"../utils":32}],9:[function(require,module,exports){ -'use strict'; - -var compressions = require('../compressions'); -var ZipFileWorker = require('./ZipFileWorker'); - -/** - * Find the compression to use. - * @param {String} fileCompression the compression defined at the file level, if any. - * @param {String} zipCompression the compression defined at the load() level. - * @return {Object} the compression object to use. - */ -var getCompression = function (fileCompression, zipCompression) { - - var compressionName = fileCompression || zipCompression; - var compression = compressions[compressionName]; - if (!compression) { - throw new Error(compressionName + " is not a valid compression method !"); - } - return compression; -}; - -/** - * Create a worker to generate a zip file. - * @param {JSZip} zip the JSZip instance at the right root level. - * @param {Object} options to generate the zip file. - * @param {String} comment the comment to use. - */ -exports.generateWorker = function (zip, options, comment) { - - var zipFileWorker = new ZipFileWorker(options.streamFiles, comment, options.platform, options.encodeFileName); - var entriesCount = 0; - try { - - zip.forEach(function (relativePath, file) { - entriesCount++; - var compression = getCompression(file.options.compression, options.compression); - var compressionOptions = file.options.compressionOptions || options.compressionOptions || {}; - var dir = file.dir, date = file.date; - - file._compressWorker(compression, compressionOptions) - .withStreamInfo("file", { - name : relativePath, - dir : dir, - date : date, - comment : file.comment || "", - unixPermissions : file.unixPermissions, - dosPermissions : file.dosPermissions - }) - .pipe(zipFileWorker); - }); - zipFileWorker.entriesCount = entriesCount; - } catch (e) { - zipFileWorker.error(e); - } - - return zipFileWorker; -}; - -},{"../compressions":3,"./ZipFileWorker":8}],10:[function(require,module,exports){ -'use strict'; - -/** - * Representation a of zip file in js - * @constructor - */ -function JSZip() { - // if this constructor is used without `new`, it adds `new` before itself: - if(!(this instanceof JSZip)) { - return new JSZip(); - } - - if(arguments.length) { - throw new Error("The constructor with parameters has been removed in JSZip 3.0, please check the upgrade guide."); - } - - // object containing the files : - // { - // "folder/" : {...}, - // "folder/data.txt" : {...} - // } - this.files = {}; - - this.comment = null; - - // Where we are in the hierarchy - this.root = ""; - this.clone = function() { - var newObj = new JSZip(); - for (var i in this) { - if (typeof this[i] !== "function") { - newObj[i] = this[i]; - } - } - return newObj; - }; -} -JSZip.prototype = require('./object'); -JSZip.prototype.loadAsync = require('./load'); -JSZip.support = require('./support'); -JSZip.defaults = require('./defaults'); - -// TODO find a better way to handle this version, -// a require('package.json').version doesn't work with webpack, see #327 -JSZip.version = "3.2.0"; - -JSZip.loadAsync = function (content, options) { - return new JSZip().loadAsync(content, options); -}; - -JSZip.external = require("./external"); -module.exports = JSZip; - -},{"./defaults":5,"./external":6,"./load":11,"./object":15,"./support":30}],11:[function(require,module,exports){ -'use strict'; -var utils = require('./utils'); -var external = require("./external"); -var utf8 = require('./utf8'); -var utils = require('./utils'); -var ZipEntries = require('./zipEntries'); -var Crc32Probe = require('./stream/Crc32Probe'); -var nodejsUtils = require("./nodejsUtils"); - -/** - * Check the CRC32 of an entry. - * @param {ZipEntry} zipEntry the zip entry to check. - * @return {Promise} the result. - */ -function checkEntryCRC32(zipEntry) { - return new external.Promise(function (resolve, reject) { - var worker = zipEntry.decompressed.getContentWorker().pipe(new Crc32Probe()); - worker.on("error", function (e) { - reject(e); - }) - .on("end", function () { - if (worker.streamInfo.crc32 !== zipEntry.decompressed.crc32) { - reject(new Error("Corrupted zip : CRC32 mismatch")); - } else { - resolve(); - } - }) - .resume(); - }); -} - -module.exports = function(data, options) { - var zip = this; - options = utils.extend(options || {}, { - base64: false, - checkCRC32: false, - optimizedBinaryString: false, - createFolders: false, - decodeFileName: utf8.utf8decode - }); - - if (nodejsUtils.isNode && nodejsUtils.isStream(data)) { - return external.Promise.reject(new Error("JSZip can't accept a stream when loading a zip file.")); - } - - return utils.prepareContent("the loaded zip file", data, true, options.optimizedBinaryString, options.base64) - .then(function(data) { - var zipEntries = new ZipEntries(options); - zipEntries.load(data); - return zipEntries; - }).then(function checkCRC32(zipEntries) { - var promises = [external.Promise.resolve(zipEntries)]; - var files = zipEntries.files; - if (options.checkCRC32) { - for (var i = 0; i < files.length; i++) { - promises.push(checkEntryCRC32(files[i])); - } - } - return external.Promise.all(promises); - }).then(function addFiles(results) { - var zipEntries = results.shift(); - var files = zipEntries.files; - for (var i = 0; i < files.length; i++) { - var input = files[i]; - zip.file(input.fileNameStr, input.decompressed, { - binary: true, - optimizedBinaryString: true, - date: input.date, - dir: input.dir, - comment : input.fileCommentStr.length ? input.fileCommentStr : null, - unixPermissions : input.unixPermissions, - dosPermissions : input.dosPermissions, - createFolders: options.createFolders - }); - } - if (zipEntries.zipComment.length) { - zip.comment = zipEntries.zipComment; - } - - return zip; - }); -}; - -},{"./external":6,"./nodejsUtils":14,"./stream/Crc32Probe":25,"./utf8":31,"./utils":32,"./zipEntries":33}],12:[function(require,module,exports){ -"use strict"; - -var utils = require('../utils'); -var GenericWorker = require('../stream/GenericWorker'); - -/** - * A worker that use a nodejs stream as source. - * @constructor - * @param {String} filename the name of the file entry for this stream. - * @param {Readable} stream the nodejs stream. - */ -function NodejsStreamInputAdapter(filename, stream) { - GenericWorker.call(this, "Nodejs stream input adapter for " + filename); - this._upstreamEnded = false; - this._bindStream(stream); -} - -utils.inherits(NodejsStreamInputAdapter, GenericWorker); - -/** - * Prepare the stream and bind the callbacks on it. - * Do this ASAP on node 0.10 ! A lazy binding doesn't always work. - * @param {Stream} stream the nodejs stream to use. - */ -NodejsStreamInputAdapter.prototype._bindStream = function (stream) { - var self = this; - this._stream = stream; - stream.pause(); - stream - .on("data", function (chunk) { - self.push({ - data: chunk, - meta : { - percent : 0 - } - }); - }) - .on("error", function (e) { - if(self.isPaused) { - this.generatedError = e; - } else { - self.error(e); - } - }) - .on("end", function () { - if(self.isPaused) { - self._upstreamEnded = true; - } else { - self.end(); - } - }); -}; -NodejsStreamInputAdapter.prototype.pause = function () { - if(!GenericWorker.prototype.pause.call(this)) { - return false; - } - this._stream.pause(); - return true; -}; -NodejsStreamInputAdapter.prototype.resume = function () { - if(!GenericWorker.prototype.resume.call(this)) { - return false; - } - - if(this._upstreamEnded) { - this.end(); - } else { - this._stream.resume(); - } - - return true; -}; - -module.exports = NodejsStreamInputAdapter; - -},{"../stream/GenericWorker":28,"../utils":32}],13:[function(require,module,exports){ -'use strict'; - -var Readable = require('readable-stream').Readable; - -var utils = require('../utils'); -utils.inherits(NodejsStreamOutputAdapter, Readable); - -/** -* A nodejs stream using a worker as source. -* @see the SourceWrapper in http://nodejs.org/api/stream.html -* @constructor -* @param {StreamHelper} helper the helper wrapping the worker -* @param {Object} options the nodejs stream options -* @param {Function} updateCb the update callback. -*/ -function NodejsStreamOutputAdapter(helper, options, updateCb) { - Readable.call(this, options); - this._helper = helper; - - var self = this; - helper.on("data", function (data, meta) { - if (!self.push(data)) { - self._helper.pause(); - } - if(updateCb) { - updateCb(meta); - } - }) - .on("error", function(e) { - self.emit('error', e); - }) - .on("end", function () { - self.push(null); - }); -} - - -NodejsStreamOutputAdapter.prototype._read = function() { - this._helper.resume(); -}; - -module.exports = NodejsStreamOutputAdapter; - -},{"../utils":32,"readable-stream":16}],14:[function(require,module,exports){ -'use strict'; - -module.exports = { - /** - * True if this is running in Nodejs, will be undefined in a browser. - * In a browser, browserify won't include this file and the whole module - * will be resolved an empty object. - */ - isNode : typeof Buffer !== "undefined", - /** - * Create a new nodejs Buffer from an existing content. - * @param {Object} data the data to pass to the constructor. - * @param {String} encoding the encoding to use. - * @return {Buffer} a new Buffer. - */ - newBufferFrom: function(data, encoding) { - if (Buffer.from && Buffer.from !== Uint8Array.from) { - return Buffer.from(data, encoding); - } else { - if (typeof data === "number") { - // Safeguard for old Node.js versions. On newer versions, - // Buffer.from(number) / Buffer(number, encoding) already throw. - throw new Error("The \"data\" argument must not be a number"); - } - return new Buffer(data, encoding); - } - }, - /** - * Create a new nodejs Buffer with the specified size. - * @param {Integer} size the size of the buffer. - * @return {Buffer} a new Buffer. - */ - allocBuffer: function (size) { - if (Buffer.alloc) { - return Buffer.alloc(size); - } else { - var buf = new Buffer(size); - buf.fill(0); - return buf; - } - }, - /** - * Find out if an object is a Buffer. - * @param {Object} b the object to test. - * @return {Boolean} true if the object is a Buffer, false otherwise. - */ - isBuffer : function(b){ - return Buffer.isBuffer(b); - }, - - isStream : function (obj) { - return obj && - typeof obj.on === "function" && - typeof obj.pause === "function" && - typeof obj.resume === "function"; - } -}; - -},{}],15:[function(require,module,exports){ -'use strict'; -var utf8 = require('./utf8'); -var utils = require('./utils'); -var GenericWorker = require('./stream/GenericWorker'); -var StreamHelper = require('./stream/StreamHelper'); -var defaults = require('./defaults'); -var CompressedObject = require('./compressedObject'); -var ZipObject = require('./zipObject'); -var generate = require("./generate"); -var nodejsUtils = require("./nodejsUtils"); -var NodejsStreamInputAdapter = require("./nodejs/NodejsStreamInputAdapter"); - - -/** - * Add a file in the current folder. - * @private - * @param {string} name the name of the file - * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data of the file - * @param {Object} originalOptions the options of the file - * @return {Object} the new file. - */ -var fileAdd = function(name, data, originalOptions) { - // be sure sub folders exist - var dataType = utils.getTypeOf(data), - parent; - - - /* - * Correct options. - */ - - var o = utils.extend(originalOptions || {}, defaults); - o.date = o.date || new Date(); - if (o.compression !== null) { - o.compression = o.compression.toUpperCase(); - } - - if (typeof o.unixPermissions === "string") { - o.unixPermissions = parseInt(o.unixPermissions, 8); - } - - // UNX_IFDIR 0040000 see zipinfo.c - if (o.unixPermissions && (o.unixPermissions & 0x4000)) { - o.dir = true; - } - // Bit 4 Directory - if (o.dosPermissions && (o.dosPermissions & 0x0010)) { - o.dir = true; - } - - if (o.dir) { - name = forceTrailingSlash(name); - } - if (o.createFolders && (parent = parentFolder(name))) { - folderAdd.call(this, parent, true); - } - - var isUnicodeString = dataType === "string" && o.binary === false && o.base64 === false; - if (!originalOptions || typeof originalOptions.binary === "undefined") { - o.binary = !isUnicodeString; - } - - - var isCompressedEmpty = (data instanceof CompressedObject) && data.uncompressedSize === 0; - - if (isCompressedEmpty || o.dir || !data || data.length === 0) { - o.base64 = false; - o.binary = true; - data = ""; - o.compression = "STORE"; - dataType = "string"; - } - - /* - * Convert content to fit. - */ - - var zipObjectContent = null; - if (data instanceof CompressedObject || data instanceof GenericWorker) { - zipObjectContent = data; - } else if (nodejsUtils.isNode && nodejsUtils.isStream(data)) { - zipObjectContent = new NodejsStreamInputAdapter(name, data); - } else { - zipObjectContent = utils.prepareContent(name, data, o.binary, o.optimizedBinaryString, o.base64); - } - - var object = new ZipObject(name, zipObjectContent, o); - this.files[name] = object; - /* - TODO: we can't throw an exception because we have async promises - (we can have a promise of a Date() for example) but returning a - promise is useless because file(name, data) returns the JSZip - object for chaining. Should we break that to allow the user - to catch the error ? - - return external.Promise.resolve(zipObjectContent) - .then(function () { - return object; - }); - */ -}; - -/** - * Find the parent folder of the path. - * @private - * @param {string} path the path to use - * @return {string} the parent folder, or "" - */ -var parentFolder = function (path) { - if (path.slice(-1) === '/') { - path = path.substring(0, path.length - 1); - } - var lastSlash = path.lastIndexOf('/'); - return (lastSlash > 0) ? path.substring(0, lastSlash) : ""; -}; - -/** - * Returns the path with a slash at the end. - * @private - * @param {String} path the path to check. - * @return {String} the path with a trailing slash. - */ -var forceTrailingSlash = function(path) { - // Check the name ends with a / - if (path.slice(-1) !== "/") { - path += "/"; // IE doesn't like substr(-1) - } - return path; -}; - -/** - * Add a (sub) folder in the current folder. - * @private - * @param {string} name the folder's name - * @param {boolean=} [createFolders] If true, automatically create sub - * folders. Defaults to false. - * @return {Object} the new folder. - */ -var folderAdd = function(name, createFolders) { - createFolders = (typeof createFolders !== 'undefined') ? createFolders : defaults.createFolders; - - name = forceTrailingSlash(name); - - // Does this folder already exist? - if (!this.files[name]) { - fileAdd.call(this, name, null, { - dir: true, - createFolders: createFolders - }); - } - return this.files[name]; -}; - -/** -* Cross-window, cross-Node-context regular expression detection -* @param {Object} object Anything -* @return {Boolean} true if the object is a regular expression, -* false otherwise -*/ -function isRegExp(object) { - return Object.prototype.toString.call(object) === "[object RegExp]"; -} - -// return the actual prototype of JSZip -var out = { - /** - * @see loadAsync - */ - load: function() { - throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); - }, - - - /** - * Call a callback function for each entry at this folder level. - * @param {Function} cb the callback function: - * function (relativePath, file) {...} - * It takes 2 arguments : the relative path and the file. - */ - forEach: function(cb) { - var filename, relativePath, file; - for (filename in this.files) { - if (!this.files.hasOwnProperty(filename)) { - continue; - } - file = this.files[filename]; - relativePath = filename.slice(this.root.length, filename.length); - if (relativePath && filename.slice(0, this.root.length) === this.root) { // the file is in the current root - cb(relativePath, file); // TODO reverse the parameters ? need to be clean AND consistent with the filter search fn... - } - } - }, - - /** - * Filter nested files/folders with the specified function. - * @param {Function} search the predicate to use : - * function (relativePath, file) {...} - * It takes 2 arguments : the relative path and the file. - * @return {Array} An array of matching elements. - */ - filter: function(search) { - var result = []; - this.forEach(function (relativePath, entry) { - if (search(relativePath, entry)) { // the file matches the function - result.push(entry); - } - - }); - return result; - }, - - /** - * Add a file to the zip file, or search a file. - * @param {string|RegExp} name The name of the file to add (if data is defined), - * the name of the file to find (if no data) or a regex to match files. - * @param {String|ArrayBuffer|Uint8Array|Buffer} data The file data, either raw or base64 encoded - * @param {Object} o File options - * @return {JSZip|Object|Array} this JSZip object (when adding a file), - * a file (when searching by string) or an array of files (when searching by regex). - */ - file: function(name, data, o) { - if (arguments.length === 1) { - if (isRegExp(name)) { - var regexp = name; - return this.filter(function(relativePath, file) { - return !file.dir && regexp.test(relativePath); - }); - } - else { // text - var obj = this.files[this.root + name]; - if (obj && !obj.dir) { - return obj; - } else { - return null; - } - } - } - else { // more than one argument : we have data ! - name = this.root + name; - fileAdd.call(this, name, data, o); - } - return this; - }, - - /** - * Add a directory to the zip file, or search. - * @param {String|RegExp} arg The name of the directory to add, or a regex to search folders. - * @return {JSZip} an object with the new directory as the root, or an array containing matching folders. - */ - folder: function(arg) { - if (!arg) { - return this; - } - - if (isRegExp(arg)) { - return this.filter(function(relativePath, file) { - return file.dir && arg.test(relativePath); - }); - } - - // else, name is a new folder - var name = this.root + arg; - var newFolder = folderAdd.call(this, name); - - // Allow chaining by returning a new object with this folder as the root - var ret = this.clone(); - ret.root = newFolder.name; - return ret; - }, - - /** - * Delete a file, or a directory and all sub-files, from the zip - * @param {string} name the name of the file to delete - * @return {JSZip} this JSZip object - */ - remove: function(name) { - name = this.root + name; - var file = this.files[name]; - if (!file) { - // Look for any folders - if (name.slice(-1) !== "/") { - name += "/"; - } - file = this.files[name]; - } - - if (file && !file.dir) { - // file - delete this.files[name]; - } else { - // maybe a folder, delete recursively - var kids = this.filter(function(relativePath, file) { - return file.name.slice(0, name.length) === name; - }); - for (var i = 0; i < kids.length; i++) { - delete this.files[kids[i].name]; - } - } - - return this; - }, - - /** - * Generate the complete zip file - * @param {Object} options the options to generate the zip file : - * - compression, "STORE" by default. - * - type, "base64" by default. Values are : string, base64, uint8array, arraybuffer, blob. - * @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the zip file - */ - generate: function(options) { - throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); - }, - - /** - * Generate the complete zip file as an internal stream. - * @param {Object} options the options to generate the zip file : - * - compression, "STORE" by default. - * - type, "base64" by default. Values are : string, base64, uint8array, arraybuffer, blob. - * @return {StreamHelper} the streamed zip file. - */ - generateInternalStream: function(options) { - var worker, opts = {}; - try { - opts = utils.extend(options || {}, { - streamFiles: false, - compression: "STORE", - compressionOptions : null, - type: "", - platform: "DOS", - comment: null, - mimeType: 'application/zip', - encodeFileName: utf8.utf8encode - }); - - opts.type = opts.type.toLowerCase(); - opts.compression = opts.compression.toUpperCase(); - - // "binarystring" is prefered but the internals use "string". - if(opts.type === "binarystring") { - opts.type = "string"; - } - - if (!opts.type) { - throw new Error("No output type specified."); - } - - utils.checkSupport(opts.type); - - // accept nodejs `process.platform` - if( - opts.platform === 'darwin' || - opts.platform === 'freebsd' || - opts.platform === 'linux' || - opts.platform === 'sunos' - ) { - opts.platform = "UNIX"; - } - if (opts.platform === 'win32') { - opts.platform = "DOS"; - } - - var comment = opts.comment || this.comment || ""; - worker = generate.generateWorker(this, opts, comment); - } catch (e) { - worker = new GenericWorker("error"); - worker.error(e); - } - return new StreamHelper(worker, opts.type || "string", opts.mimeType); - }, - /** - * Generate the complete zip file asynchronously. - * @see generateInternalStream - */ - generateAsync: function(options, onUpdate) { - return this.generateInternalStream(options).accumulate(onUpdate); - }, - /** - * Generate the complete zip file asynchronously. - * @see generateInternalStream - */ - generateNodeStream: function(options, onUpdate) { - options = options || {}; - if (!options.type) { - options.type = "nodebuffer"; - } - return this.generateInternalStream(options).toNodejsStream(onUpdate); - } -}; -module.exports = out; - -},{"./compressedObject":2,"./defaults":5,"./generate":9,"./nodejs/NodejsStreamInputAdapter":12,"./nodejsUtils":14,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31,"./utils":32,"./zipObject":35}],16:[function(require,module,exports){ -/* - * This file is used by module bundlers (browserify/webpack/etc) when - * including a stream implementation. We use "readable-stream" to get a - * consistent behavior between nodejs versions but bundlers often have a shim - * for "stream". Using this shim greatly improve the compatibility and greatly - * reduce the final size of the bundle (only one stream implementation, not - * two). - */ -module.exports = require("stream"); - -},{"stream":undefined}],17:[function(require,module,exports){ -'use strict'; -var DataReader = require('./DataReader'); -var utils = require('../utils'); - -function ArrayReader(data) { - DataReader.call(this, data); - for(var i = 0; i < this.data.length; i++) { - data[i] = data[i] & 0xFF; - } -} -utils.inherits(ArrayReader, DataReader); -/** - * @see DataReader.byteAt - */ -ArrayReader.prototype.byteAt = function(i) { - return this.data[this.zero + i]; -}; -/** - * @see DataReader.lastIndexOfSignature - */ -ArrayReader.prototype.lastIndexOfSignature = function(sig) { - var sig0 = sig.charCodeAt(0), - sig1 = sig.charCodeAt(1), - sig2 = sig.charCodeAt(2), - sig3 = sig.charCodeAt(3); - for (var i = this.length - 4; i >= 0; --i) { - if (this.data[i] === sig0 && this.data[i + 1] === sig1 && this.data[i + 2] === sig2 && this.data[i + 3] === sig3) { - return i - this.zero; - } - } - - return -1; -}; -/** - * @see DataReader.readAndCheckSignature - */ -ArrayReader.prototype.readAndCheckSignature = function (sig) { - var sig0 = sig.charCodeAt(0), - sig1 = sig.charCodeAt(1), - sig2 = sig.charCodeAt(2), - sig3 = sig.charCodeAt(3), - data = this.readData(4); - return sig0 === data[0] && sig1 === data[1] && sig2 === data[2] && sig3 === data[3]; -}; -/** - * @see DataReader.readData - */ -ArrayReader.prototype.readData = function(size) { - this.checkOffset(size); - if(size === 0) { - return []; - } - var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); - this.index += size; - return result; -}; -module.exports = ArrayReader; - -},{"../utils":32,"./DataReader":18}],18:[function(require,module,exports){ -'use strict'; -var utils = require('../utils'); - -function DataReader(data) { - this.data = data; // type : see implementation - this.length = data.length; - this.index = 0; - this.zero = 0; -} -DataReader.prototype = { - /** - * Check that the offset will not go too far. - * @param {string} offset the additional offset to check. - * @throws {Error} an Error if the offset is out of bounds. - */ - checkOffset: function(offset) { - this.checkIndex(this.index + offset); - }, - /** - * Check that the specified index will not be too far. - * @param {string} newIndex the index to check. - * @throws {Error} an Error if the index is out of bounds. - */ - checkIndex: function(newIndex) { - if (this.length < this.zero + newIndex || newIndex < 0) { - throw new Error("End of data reached (data length = " + this.length + ", asked index = " + (newIndex) + "). Corrupted zip ?"); - } - }, - /** - * Change the index. - * @param {number} newIndex The new index. - * @throws {Error} if the new index is out of the data. - */ - setIndex: function(newIndex) { - this.checkIndex(newIndex); - this.index = newIndex; - }, - /** - * Skip the next n bytes. - * @param {number} n the number of bytes to skip. - * @throws {Error} if the new index is out of the data. - */ - skip: function(n) { - this.setIndex(this.index + n); - }, - /** - * Get the byte at the specified index. - * @param {number} i the index to use. - * @return {number} a byte. - */ - byteAt: function(i) { - // see implementations - }, - /** - * Get the next number with a given byte size. - * @param {number} size the number of bytes to read. - * @return {number} the corresponding number. - */ - readInt: function(size) { - var result = 0, - i; - this.checkOffset(size); - for (i = this.index + size - 1; i >= this.index; i--) { - result = (result << 8) + this.byteAt(i); - } - this.index += size; - return result; - }, - /** - * Get the next string with a given byte size. - * @param {number} size the number of bytes to read. - * @return {string} the corresponding string. - */ - readString: function(size) { - return utils.transformTo("string", this.readData(size)); - }, - /** - * Get raw data without conversion, bytes. - * @param {number} size the number of bytes to read. - * @return {Object} the raw data, implementation specific. - */ - readData: function(size) { - // see implementations - }, - /** - * Find the last occurence of a zip signature (4 bytes). - * @param {string} sig the signature to find. - * @return {number} the index of the last occurence, -1 if not found. - */ - lastIndexOfSignature: function(sig) { - // see implementations - }, - /** - * Read the signature (4 bytes) at the current position and compare it with sig. - * @param {string} sig the expected signature - * @return {boolean} true if the signature matches, false otherwise. - */ - readAndCheckSignature: function(sig) { - // see implementations - }, - /** - * Get the next date. - * @return {Date} the date. - */ - readDate: function() { - var dostime = this.readInt(4); - return new Date(Date.UTC( - ((dostime >> 25) & 0x7f) + 1980, // year - ((dostime >> 21) & 0x0f) - 1, // month - (dostime >> 16) & 0x1f, // day - (dostime >> 11) & 0x1f, // hour - (dostime >> 5) & 0x3f, // minute - (dostime & 0x1f) << 1)); // second - } -}; -module.exports = DataReader; - -},{"../utils":32}],19:[function(require,module,exports){ -'use strict'; -var Uint8ArrayReader = require('./Uint8ArrayReader'); -var utils = require('../utils'); - -function NodeBufferReader(data) { - Uint8ArrayReader.call(this, data); -} -utils.inherits(NodeBufferReader, Uint8ArrayReader); - -/** - * @see DataReader.readData - */ -NodeBufferReader.prototype.readData = function(size) { - this.checkOffset(size); - var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); - this.index += size; - return result; -}; -module.exports = NodeBufferReader; - -},{"../utils":32,"./Uint8ArrayReader":21}],20:[function(require,module,exports){ -'use strict'; -var DataReader = require('./DataReader'); -var utils = require('../utils'); - -function StringReader(data) { - DataReader.call(this, data); -} -utils.inherits(StringReader, DataReader); -/** - * @see DataReader.byteAt - */ -StringReader.prototype.byteAt = function(i) { - return this.data.charCodeAt(this.zero + i); -}; -/** - * @see DataReader.lastIndexOfSignature - */ -StringReader.prototype.lastIndexOfSignature = function(sig) { - return this.data.lastIndexOf(sig) - this.zero; -}; -/** - * @see DataReader.readAndCheckSignature - */ -StringReader.prototype.readAndCheckSignature = function (sig) { - var data = this.readData(4); - return sig === data; -}; -/** - * @see DataReader.readData - */ -StringReader.prototype.readData = function(size) { - this.checkOffset(size); - // this will work because the constructor applied the "& 0xff" mask. - var result = this.data.slice(this.zero + this.index, this.zero + this.index + size); - this.index += size; - return result; -}; -module.exports = StringReader; - -},{"../utils":32,"./DataReader":18}],21:[function(require,module,exports){ -'use strict'; -var ArrayReader = require('./ArrayReader'); -var utils = require('../utils'); - -function Uint8ArrayReader(data) { - ArrayReader.call(this, data); -} -utils.inherits(Uint8ArrayReader, ArrayReader); -/** - * @see DataReader.readData - */ -Uint8ArrayReader.prototype.readData = function(size) { - this.checkOffset(size); - if(size === 0) { - // in IE10, when using subarray(idx, idx), we get the array [0x00] instead of []. - return new Uint8Array(0); - } - var result = this.data.subarray(this.zero + this.index, this.zero + this.index + size); - this.index += size; - return result; -}; -module.exports = Uint8ArrayReader; - -},{"../utils":32,"./ArrayReader":17}],22:[function(require,module,exports){ -'use strict'; - -var utils = require('../utils'); -var support = require('../support'); -var ArrayReader = require('./ArrayReader'); -var StringReader = require('./StringReader'); -var NodeBufferReader = require('./NodeBufferReader'); -var Uint8ArrayReader = require('./Uint8ArrayReader'); - -/** - * Create a reader adapted to the data. - * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data to read. - * @return {DataReader} the data reader. - */ -module.exports = function (data) { - var type = utils.getTypeOf(data); - utils.checkSupport(type); - if (type === "string" && !support.uint8array) { - return new StringReader(data); - } - if (type === "nodebuffer") { - return new NodeBufferReader(data); - } - if (support.uint8array) { - return new Uint8ArrayReader(utils.transformTo("uint8array", data)); - } - return new ArrayReader(utils.transformTo("array", data)); -}; - -},{"../support":30,"../utils":32,"./ArrayReader":17,"./NodeBufferReader":19,"./StringReader":20,"./Uint8ArrayReader":21}],23:[function(require,module,exports){ -'use strict'; -exports.LOCAL_FILE_HEADER = "PK\x03\x04"; -exports.CENTRAL_FILE_HEADER = "PK\x01\x02"; -exports.CENTRAL_DIRECTORY_END = "PK\x05\x06"; -exports.ZIP64_CENTRAL_DIRECTORY_LOCATOR = "PK\x06\x07"; -exports.ZIP64_CENTRAL_DIRECTORY_END = "PK\x06\x06"; -exports.DATA_DESCRIPTOR = "PK\x07\x08"; - -},{}],24:[function(require,module,exports){ -'use strict'; - -var GenericWorker = require('./GenericWorker'); -var utils = require('../utils'); - -/** - * A worker which convert chunks to a specified type. - * @constructor - * @param {String} destType the destination type. - */ -function ConvertWorker(destType) { - GenericWorker.call(this, "ConvertWorker to " + destType); - this.destType = destType; -} -utils.inherits(ConvertWorker, GenericWorker); - -/** - * @see GenericWorker.processChunk - */ -ConvertWorker.prototype.processChunk = function (chunk) { - this.push({ - data : utils.transformTo(this.destType, chunk.data), - meta : chunk.meta - }); -}; -module.exports = ConvertWorker; - -},{"../utils":32,"./GenericWorker":28}],25:[function(require,module,exports){ -'use strict'; - -var GenericWorker = require('./GenericWorker'); -var crc32 = require('../crc32'); -var utils = require('../utils'); - -/** - * A worker which calculate the crc32 of the data flowing through. - * @constructor - */ -function Crc32Probe() { - GenericWorker.call(this, "Crc32Probe"); - this.withStreamInfo("crc32", 0); -} -utils.inherits(Crc32Probe, GenericWorker); - -/** - * @see GenericWorker.processChunk - */ -Crc32Probe.prototype.processChunk = function (chunk) { - this.streamInfo.crc32 = crc32(chunk.data, this.streamInfo.crc32 || 0); - this.push(chunk); -}; -module.exports = Crc32Probe; - -},{"../crc32":4,"../utils":32,"./GenericWorker":28}],26:[function(require,module,exports){ -'use strict'; - -var utils = require('../utils'); -var GenericWorker = require('./GenericWorker'); - -/** - * A worker which calculate the total length of the data flowing through. - * @constructor - * @param {String} propName the name used to expose the length - */ -function DataLengthProbe(propName) { - GenericWorker.call(this, "DataLengthProbe for " + propName); - this.propName = propName; - this.withStreamInfo(propName, 0); -} -utils.inherits(DataLengthProbe, GenericWorker); - -/** - * @see GenericWorker.processChunk - */ -DataLengthProbe.prototype.processChunk = function (chunk) { - if(chunk) { - var length = this.streamInfo[this.propName] || 0; - this.streamInfo[this.propName] = length + chunk.data.length; - } - GenericWorker.prototype.processChunk.call(this, chunk); -}; -module.exports = DataLengthProbe; - - -},{"../utils":32,"./GenericWorker":28}],27:[function(require,module,exports){ -'use strict'; - -var utils = require('../utils'); -var GenericWorker = require('./GenericWorker'); - -// the size of the generated chunks -// TODO expose this as a public variable -var DEFAULT_BLOCK_SIZE = 16 * 1024; - -/** - * A worker that reads a content and emits chunks. - * @constructor - * @param {Promise} dataP the promise of the data to split - */ -function DataWorker(dataP) { - GenericWorker.call(this, "DataWorker"); - var self = this; - this.dataIsReady = false; - this.index = 0; - this.max = 0; - this.data = null; - this.type = ""; - - this._tickScheduled = false; - - dataP.then(function (data) { - self.dataIsReady = true; - self.data = data; - self.max = data && data.length || 0; - self.type = utils.getTypeOf(data); - if(!self.isPaused) { - self._tickAndRepeat(); - } - }, function (e) { - self.error(e); - }); -} - -utils.inherits(DataWorker, GenericWorker); - -/** - * @see GenericWorker.cleanUp - */ -DataWorker.prototype.cleanUp = function () { - GenericWorker.prototype.cleanUp.call(this); - this.data = null; -}; - -/** - * @see GenericWorker.resume - */ -DataWorker.prototype.resume = function () { - if(!GenericWorker.prototype.resume.call(this)) { - return false; - } - - if (!this._tickScheduled && this.dataIsReady) { - this._tickScheduled = true; - utils.delay(this._tickAndRepeat, [], this); - } - return true; -}; - -/** - * Trigger a tick a schedule an other call to this function. - */ -DataWorker.prototype._tickAndRepeat = function() { - this._tickScheduled = false; - if(this.isPaused || this.isFinished) { - return; - } - this._tick(); - if(!this.isFinished) { - utils.delay(this._tickAndRepeat, [], this); - this._tickScheduled = true; - } -}; - -/** - * Read and push a chunk. - */ -DataWorker.prototype._tick = function() { - - if(this.isPaused || this.isFinished) { - return false; - } - - var size = DEFAULT_BLOCK_SIZE; - var data = null, nextIndex = Math.min(this.max, this.index + size); - if (this.index >= this.max) { - // EOF - return this.end(); - } else { - switch(this.type) { - case "string": - data = this.data.substring(this.index, nextIndex); - break; - case "uint8array": - data = this.data.subarray(this.index, nextIndex); - break; - case "array": - case "nodebuffer": - data = this.data.slice(this.index, nextIndex); - break; - } - this.index = nextIndex; - return this.push({ - data : data, - meta : { - percent : this.max ? this.index / this.max * 100 : 0 - } - }); - } -}; - -module.exports = DataWorker; - -},{"../utils":32,"./GenericWorker":28}],28:[function(require,module,exports){ -'use strict'; - -/** - * A worker that does nothing but passing chunks to the next one. This is like - * a nodejs stream but with some differences. On the good side : - * - it works on IE 6-9 without any issue / polyfill - * - it weights less than the full dependencies bundled with browserify - * - it forwards errors (no need to declare an error handler EVERYWHERE) - * - * A chunk is an object with 2 attributes : `meta` and `data`. The former is an - * object containing anything (`percent` for example), see each worker for more - * details. The latter is the real data (String, Uint8Array, etc). - * - * @constructor - * @param {String} name the name of the stream (mainly used for debugging purposes) - */ -function GenericWorker(name) { - // the name of the worker - this.name = name || "default"; - // an object containing metadata about the workers chain - this.streamInfo = {}; - // an error which happened when the worker was paused - this.generatedError = null; - // an object containing metadata to be merged by this worker into the general metadata - this.extraStreamInfo = {}; - // true if the stream is paused (and should not do anything), false otherwise - this.isPaused = true; - // true if the stream is finished (and should not do anything), false otherwise - this.isFinished = false; - // true if the stream is locked to prevent further structure updates (pipe), false otherwise - this.isLocked = false; - // the event listeners - this._listeners = { - 'data':[], - 'end':[], - 'error':[] - }; - // the previous worker, if any - this.previous = null; -} - -GenericWorker.prototype = { - /** - * Push a chunk to the next workers. - * @param {Object} chunk the chunk to push - */ - push : function (chunk) { - this.emit("data", chunk); - }, - /** - * End the stream. - * @return {Boolean} true if this call ended the worker, false otherwise. - */ - end : function () { - if (this.isFinished) { - return false; - } - - this.flush(); - try { - this.emit("end"); - this.cleanUp(); - this.isFinished = true; - } catch (e) { - this.emit("error", e); - } - return true; - }, - /** - * End the stream with an error. - * @param {Error} e the error which caused the premature end. - * @return {Boolean} true if this call ended the worker with an error, false otherwise. - */ - error : function (e) { - if (this.isFinished) { - return false; - } - - if(this.isPaused) { - this.generatedError = e; - } else { - this.isFinished = true; - - this.emit("error", e); - - // in the workers chain exploded in the middle of the chain, - // the error event will go downward but we also need to notify - // workers upward that there has been an error. - if(this.previous) { - this.previous.error(e); - } - - this.cleanUp(); - } - return true; - }, - /** - * Add a callback on an event. - * @param {String} name the name of the event (data, end, error) - * @param {Function} listener the function to call when the event is triggered - * @return {GenericWorker} the current object for chainability - */ - on : function (name, listener) { - this._listeners[name].push(listener); - return this; - }, - /** - * Clean any references when a worker is ending. - */ - cleanUp : function () { - this.streamInfo = this.generatedError = this.extraStreamInfo = null; - this._listeners = []; - }, - /** - * Trigger an event. This will call registered callback with the provided arg. - * @param {String} name the name of the event (data, end, error) - * @param {Object} arg the argument to call the callback with. - */ - emit : function (name, arg) { - if (this._listeners[name]) { - for(var i = 0; i < this._listeners[name].length; i++) { - this._listeners[name][i].call(this, arg); - } - } - }, - /** - * Chain a worker with an other. - * @param {Worker} next the worker receiving events from the current one. - * @return {worker} the next worker for chainability - */ - pipe : function (next) { - return next.registerPrevious(this); - }, - /** - * Same as `pipe` in the other direction. - * Using an API with `pipe(next)` is very easy. - * Implementing the API with the point of view of the next one registering - * a source is easier, see the ZipFileWorker. - * @param {Worker} previous the previous worker, sending events to this one - * @return {Worker} the current worker for chainability - */ - registerPrevious : function (previous) { - if (this.isLocked) { - throw new Error("The stream '" + this + "' has already been used."); - } - - // sharing the streamInfo... - this.streamInfo = previous.streamInfo; - // ... and adding our own bits - this.mergeStreamInfo(); - this.previous = previous; - var self = this; - previous.on('data', function (chunk) { - self.processChunk(chunk); - }); - previous.on('end', function () { - self.end(); - }); - previous.on('error', function (e) { - self.error(e); - }); - return this; - }, - /** - * Pause the stream so it doesn't send events anymore. - * @return {Boolean} true if this call paused the worker, false otherwise. - */ - pause : function () { - if(this.isPaused || this.isFinished) { - return false; - } - this.isPaused = true; - - if(this.previous) { - this.previous.pause(); - } - return true; - }, - /** - * Resume a paused stream. - * @return {Boolean} true if this call resumed the worker, false otherwise. - */ - resume : function () { - if(!this.isPaused || this.isFinished) { - return false; - } - this.isPaused = false; - - // if true, the worker tried to resume but failed - var withError = false; - if(this.generatedError) { - this.error(this.generatedError); - withError = true; - } - if(this.previous) { - this.previous.resume(); - } - - return !withError; - }, - /** - * Flush any remaining bytes as the stream is ending. - */ - flush : function () {}, - /** - * Process a chunk. This is usually the method overridden. - * @param {Object} chunk the chunk to process. - */ - processChunk : function(chunk) { - this.push(chunk); - }, - /** - * Add a key/value to be added in the workers chain streamInfo once activated. - * @param {String} key the key to use - * @param {Object} value the associated value - * @return {Worker} the current worker for chainability - */ - withStreamInfo : function (key, value) { - this.extraStreamInfo[key] = value; - this.mergeStreamInfo(); - return this; - }, - /** - * Merge this worker's streamInfo into the chain's streamInfo. - */ - mergeStreamInfo : function () { - for(var key in this.extraStreamInfo) { - if (!this.extraStreamInfo.hasOwnProperty(key)) { - continue; - } - this.streamInfo[key] = this.extraStreamInfo[key]; - } - }, - - /** - * Lock the stream to prevent further updates on the workers chain. - * After calling this method, all calls to pipe will fail. - */ - lock: function () { - if (this.isLocked) { - throw new Error("The stream '" + this + "' has already been used."); - } - this.isLocked = true; - if (this.previous) { - this.previous.lock(); - } - }, - - /** - * - * Pretty print the workers chain. - */ - toString : function () { - var me = "Worker " + this.name; - if (this.previous) { - return this.previous + " -> " + me; - } else { - return me; - } - } -}; - -module.exports = GenericWorker; - -},{}],29:[function(require,module,exports){ -'use strict'; - -var utils = require('../utils'); -var ConvertWorker = require('./ConvertWorker'); -var GenericWorker = require('./GenericWorker'); -var base64 = require('../base64'); -var support = require("../support"); -var external = require("../external"); - -var NodejsStreamOutputAdapter = null; -if (support.nodestream) { - try { - NodejsStreamOutputAdapter = require('../nodejs/NodejsStreamOutputAdapter'); - } catch(e) {} -} - -/** - * Apply the final transformation of the data. If the user wants a Blob for - * example, it's easier to work with an U8intArray and finally do the - * ArrayBuffer/Blob conversion. - * @param {String} type the name of the final type - * @param {String|Uint8Array|Buffer} content the content to transform - * @param {String} mimeType the mime type of the content, if applicable. - * @return {String|Uint8Array|ArrayBuffer|Buffer|Blob} the content in the right format. - */ -function transformZipOutput(type, content, mimeType) { - switch(type) { - case "blob" : - return utils.newBlob(utils.transformTo("arraybuffer", content), mimeType); - case "base64" : - return base64.encode(content); - default : - return utils.transformTo(type, content); - } -} - -/** - * Concatenate an array of data of the given type. - * @param {String} type the type of the data in the given array. - * @param {Array} dataArray the array containing the data chunks to concatenate - * @return {String|Uint8Array|Buffer} the concatenated data - * @throws Error if the asked type is unsupported - */ -function concat (type, dataArray) { - var i, index = 0, res = null, totalLength = 0; - for(i = 0; i < dataArray.length; i++) { - totalLength += dataArray[i].length; - } - switch(type) { - case "string": - return dataArray.join(""); - case "array": - return Array.prototype.concat.apply([], dataArray); - case "uint8array": - res = new Uint8Array(totalLength); - for(i = 0; i < dataArray.length; i++) { - res.set(dataArray[i], index); - index += dataArray[i].length; - } - return res; - case "nodebuffer": - return Buffer.concat(dataArray); - default: - throw new Error("concat : unsupported type '" + type + "'"); - } -} - -/** - * Listen a StreamHelper, accumulate its content and concatenate it into a - * complete block. - * @param {StreamHelper} helper the helper to use. - * @param {Function} updateCallback a callback called on each update. Called - * with one arg : - * - the metadata linked to the update received. - * @return Promise the promise for the accumulation. - */ -function accumulate(helper, updateCallback) { - return new external.Promise(function (resolve, reject){ - var dataArray = []; - var chunkType = helper._internalType, - resultType = helper._outputType, - mimeType = helper._mimeType; - helper - .on('data', function (data, meta) { - dataArray.push(data); - if(updateCallback) { - updateCallback(meta); - } - }) - .on('error', function(err) { - dataArray = []; - reject(err); - }) - .on('end', function (){ - try { - var result = transformZipOutput(resultType, concat(chunkType, dataArray), mimeType); - resolve(result); - } catch (e) { - reject(e); - } - dataArray = []; - }) - .resume(); - }); -} - -/** - * An helper to easily use workers outside of JSZip. - * @constructor - * @param {Worker} worker the worker to wrap - * @param {String} outputType the type of data expected by the use - * @param {String} mimeType the mime type of the content, if applicable. - */ -function StreamHelper(worker, outputType, mimeType) { - var internalType = outputType; - switch(outputType) { - case "blob": - case "arraybuffer": - internalType = "uint8array"; - break; - case "base64": - internalType = "string"; - break; - } - - try { - // the type used internally - this._internalType = internalType; - // the type used to output results - this._outputType = outputType; - // the mime type - this._mimeType = mimeType; - utils.checkSupport(internalType); - this._worker = worker.pipe(new ConvertWorker(internalType)); - // the last workers can be rewired without issues but we need to - // prevent any updates on previous workers. - worker.lock(); - } catch(e) { - this._worker = new GenericWorker("error"); - this._worker.error(e); - } -} - -StreamHelper.prototype = { - /** - * Listen a StreamHelper, accumulate its content and concatenate it into a - * complete block. - * @param {Function} updateCb the update callback. - * @return Promise the promise for the accumulation. - */ - accumulate : function (updateCb) { - return accumulate(this, updateCb); - }, - /** - * Add a listener on an event triggered on a stream. - * @param {String} evt the name of the event - * @param {Function} fn the listener - * @return {StreamHelper} the current helper. - */ - on : function (evt, fn) { - var self = this; - - if(evt === "data") { - this._worker.on(evt, function (chunk) { - fn.call(self, chunk.data, chunk.meta); - }); - } else { - this._worker.on(evt, function () { - utils.delay(fn, arguments, self); - }); - } - return this; - }, - /** - * Resume the flow of chunks. - * @return {StreamHelper} the current helper. - */ - resume : function () { - utils.delay(this._worker.resume, [], this._worker); - return this; - }, - /** - * Pause the flow of chunks. - * @return {StreamHelper} the current helper. - */ - pause : function () { - this._worker.pause(); - return this; - }, - /** - * Return a nodejs stream for this helper. - * @param {Function} updateCb the update callback. - * @return {NodejsStreamOutputAdapter} the nodejs stream. - */ - toNodejsStream : function (updateCb) { - utils.checkSupport("nodestream"); - if (this._outputType !== "nodebuffer") { - // an object stream containing blob/arraybuffer/uint8array/string - // is strange and I don't know if it would be useful. - // I you find this comment and have a good usecase, please open a - // bug report ! - throw new Error(this._outputType + " is not supported by this method"); - } - - return new NodejsStreamOutputAdapter(this, { - objectMode : this._outputType !== "nodebuffer" - }, updateCb); - } -}; - - -module.exports = StreamHelper; - -},{"../base64":1,"../external":6,"../nodejs/NodejsStreamOutputAdapter":13,"../support":30,"../utils":32,"./ConvertWorker":24,"./GenericWorker":28}],30:[function(require,module,exports){ -'use strict'; - -exports.base64 = true; -exports.array = true; -exports.string = true; -exports.arraybuffer = typeof ArrayBuffer !== "undefined" && typeof Uint8Array !== "undefined"; -exports.nodebuffer = typeof Buffer !== "undefined"; -// contains true if JSZip can read/generate Uint8Array, false otherwise. -exports.uint8array = typeof Uint8Array !== "undefined"; - -if (typeof ArrayBuffer === "undefined") { - exports.blob = false; -} -else { - var buffer = new ArrayBuffer(0); - try { - exports.blob = new Blob([buffer], { - type: "application/zip" - }).size === 0; - } - catch (e) { - try { - var Builder = self.BlobBuilder || self.WebKitBlobBuilder || self.MozBlobBuilder || self.MSBlobBuilder; - var builder = new Builder(); - builder.append(buffer); - exports.blob = builder.getBlob('application/zip').size === 0; - } - catch (e) { - exports.blob = false; - } - } -} - -try { - exports.nodestream = !!require('readable-stream').Readable; -} catch(e) { - exports.nodestream = false; -} - -},{"readable-stream":16}],31:[function(require,module,exports){ -'use strict'; - -var utils = require('./utils'); -var support = require('./support'); -var nodejsUtils = require('./nodejsUtils'); -var GenericWorker = require('./stream/GenericWorker'); - -/** - * The following functions come from pako, from pako/lib/utils/strings - * released under the MIT license, see pako https://github.com/nodeca/pako/ - */ - -// Table with utf8 lengths (calculated by first byte of sequence) -// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, -// because max possible codepoint is 0x10ffff -var _utf8len = new Array(256); -for (var i=0; i<256; i++) { - _utf8len[i] = (i >= 252 ? 6 : i >= 248 ? 5 : i >= 240 ? 4 : i >= 224 ? 3 : i >= 192 ? 2 : 1); -} -_utf8len[254]=_utf8len[254]=1; // Invalid sequence start - -// convert string to array (typed, when possible) -var string2buf = function (str) { - var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; - - // count binary size - for (m_pos = 0; m_pos < str_len; m_pos++) { - c = str.charCodeAt(m_pos); - if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) { - c2 = str.charCodeAt(m_pos+1); - if ((c2 & 0xfc00) === 0xdc00) { - c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); - m_pos++; - } - } - buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; - } - - // allocate buffer - if (support.uint8array) { - buf = new Uint8Array(buf_len); - } else { - buf = new Array(buf_len); - } - - // convert - for (i=0, m_pos = 0; i < buf_len; m_pos++) { - c = str.charCodeAt(m_pos); - if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) { - c2 = str.charCodeAt(m_pos+1); - if ((c2 & 0xfc00) === 0xdc00) { - c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); - m_pos++; - } - } - if (c < 0x80) { - /* one byte */ - buf[i++] = c; - } else if (c < 0x800) { - /* two bytes */ - buf[i++] = 0xC0 | (c >>> 6); - buf[i++] = 0x80 | (c & 0x3f); - } else if (c < 0x10000) { - /* three bytes */ - buf[i++] = 0xE0 | (c >>> 12); - buf[i++] = 0x80 | (c >>> 6 & 0x3f); - buf[i++] = 0x80 | (c & 0x3f); - } else { - /* four bytes */ - buf[i++] = 0xf0 | (c >>> 18); - buf[i++] = 0x80 | (c >>> 12 & 0x3f); - buf[i++] = 0x80 | (c >>> 6 & 0x3f); - buf[i++] = 0x80 | (c & 0x3f); - } - } - - return buf; -}; - -// Calculate max possible position in utf8 buffer, -// that will not break sequence. If that's not possible -// - (very small limits) return max size as is. -// -// buf[] - utf8 bytes array -// max - length limit (mandatory); -var utf8border = function(buf, max) { - var pos; - - max = max || buf.length; - if (max > buf.length) { max = buf.length; } - - // go back from last position, until start of sequence found - pos = max-1; - while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } - - // Fuckup - very small and broken sequence, - // return max, because we should return something anyway. - if (pos < 0) { return max; } - - // If we came to start of buffer - that means vuffer is too small, - // return max too. - if (pos === 0) { return max; } - - return (pos + _utf8len[buf[pos]] > max) ? pos : max; -}; - -// convert array to string -var buf2string = function (buf) { - var str, i, out, c, c_len; - var len = buf.length; - - // Reserve max possible length (2 words per char) - // NB: by unknown reasons, Array is significantly faster for - // String.fromCharCode.apply than Uint16Array. - var utf16buf = new Array(len*2); - - for (out=0, i=0; i 4) { utf16buf[out++] = 0xfffd; i += c_len-1; continue; } - - // apply mask on first byte - c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; - // join the rest - while (c_len > 1 && i < len) { - c = (c << 6) | (buf[i++] & 0x3f); - c_len--; - } - - // terminated by end of string? - if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } - - if (c < 0x10000) { - utf16buf[out++] = c; - } else { - c -= 0x10000; - utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); - utf16buf[out++] = 0xdc00 | (c & 0x3ff); - } - } - - // shrinkBuf(utf16buf, out) - if (utf16buf.length !== out) { - if(utf16buf.subarray) { - utf16buf = utf16buf.subarray(0, out); - } else { - utf16buf.length = out; - } - } - - // return String.fromCharCode.apply(null, utf16buf); - return utils.applyFromCharCode(utf16buf); -}; - - -// That's all for the pako functions. - - -/** - * Transform a javascript string into an array (typed if possible) of bytes, - * UTF-8 encoded. - * @param {String} str the string to encode - * @return {Array|Uint8Array|Buffer} the UTF-8 encoded string. - */ -exports.utf8encode = function utf8encode(str) { - if (support.nodebuffer) { - return nodejsUtils.newBufferFrom(str, "utf-8"); - } - - return string2buf(str); -}; - - -/** - * Transform a bytes array (or a representation) representing an UTF-8 encoded - * string into a javascript string. - * @param {Array|Uint8Array|Buffer} buf the data de decode - * @return {String} the decoded string. - */ -exports.utf8decode = function utf8decode(buf) { - if (support.nodebuffer) { - return utils.transformTo("nodebuffer", buf).toString("utf-8"); - } - - buf = utils.transformTo(support.uint8array ? "uint8array" : "array", buf); - - return buf2string(buf); -}; - -/** - * A worker to decode utf8 encoded binary chunks into string chunks. - * @constructor - */ -function Utf8DecodeWorker() { - GenericWorker.call(this, "utf-8 decode"); - // the last bytes if a chunk didn't end with a complete codepoint. - this.leftOver = null; -} -utils.inherits(Utf8DecodeWorker, GenericWorker); - -/** - * @see GenericWorker.processChunk - */ -Utf8DecodeWorker.prototype.processChunk = function (chunk) { - - var data = utils.transformTo(support.uint8array ? "uint8array" : "array", chunk.data); - - // 1st step, re-use what's left of the previous chunk - if (this.leftOver && this.leftOver.length) { - if(support.uint8array) { - var previousData = data; - data = new Uint8Array(previousData.length + this.leftOver.length); - data.set(this.leftOver, 0); - data.set(previousData, this.leftOver.length); - } else { - data = this.leftOver.concat(data); - } - this.leftOver = null; - } - - var nextBoundary = utf8border(data); - var usableData = data; - if (nextBoundary !== data.length) { - if (support.uint8array) { - usableData = data.subarray(0, nextBoundary); - this.leftOver = data.subarray(nextBoundary, data.length); - } else { - usableData = data.slice(0, nextBoundary); - this.leftOver = data.slice(nextBoundary, data.length); - } - } - - this.push({ - data : exports.utf8decode(usableData), - meta : chunk.meta - }); -}; - -/** - * @see GenericWorker.flush - */ -Utf8DecodeWorker.prototype.flush = function () { - if(this.leftOver && this.leftOver.length) { - this.push({ - data : exports.utf8decode(this.leftOver), - meta : {} - }); - this.leftOver = null; - } -}; -exports.Utf8DecodeWorker = Utf8DecodeWorker; - -/** - * A worker to endcode string chunks into utf8 encoded binary chunks. - * @constructor - */ -function Utf8EncodeWorker() { - GenericWorker.call(this, "utf-8 encode"); -} -utils.inherits(Utf8EncodeWorker, GenericWorker); - -/** - * @see GenericWorker.processChunk - */ -Utf8EncodeWorker.prototype.processChunk = function (chunk) { - this.push({ - data : exports.utf8encode(chunk.data), - meta : chunk.meta - }); -}; -exports.Utf8EncodeWorker = Utf8EncodeWorker; - -},{"./nodejsUtils":14,"./stream/GenericWorker":28,"./support":30,"./utils":32}],32:[function(require,module,exports){ -'use strict'; - -var support = require('./support'); -var base64 = require('./base64'); -var nodejsUtils = require('./nodejsUtils'); -var setImmediate = require('set-immediate-shim'); -var external = require("./external"); - - -/** - * Convert a string that pass as a "binary string": it should represent a byte - * array but may have > 255 char codes. Be sure to take only the first byte - * and returns the byte array. - * @param {String} str the string to transform. - * @return {Array|Uint8Array} the string in a binary format. - */ -function string2binary(str) { - var result = null; - if (support.uint8array) { - result = new Uint8Array(str.length); - } else { - result = new Array(str.length); - } - return stringToArrayLike(str, result); -} - -/** - * Create a new blob with the given content and the given type. - * @param {String|ArrayBuffer} part the content to put in the blob. DO NOT use - * an Uint8Array because the stock browser of android 4 won't accept it (it - * will be silently converted to a string, "[object Uint8Array]"). - * - * Use only ONE part to build the blob to avoid a memory leak in IE11 / Edge: - * when a large amount of Array is used to create the Blob, the amount of - * memory consumed is nearly 100 times the original data amount. - * - * @param {String} type the mime type of the blob. - * @return {Blob} the created blob. - */ -exports.newBlob = function(part, type) { - exports.checkSupport("blob"); - - try { - // Blob constructor - return new Blob([part], { - type: type - }); - } - catch (e) { - - try { - // deprecated, browser only, old way - var Builder = self.BlobBuilder || self.WebKitBlobBuilder || self.MozBlobBuilder || self.MSBlobBuilder; - var builder = new Builder(); - builder.append(part); - return builder.getBlob(type); - } - catch (e) { - - // well, fuck ?! - throw new Error("Bug : can't construct the Blob."); - } - } - - -}; -/** - * The identity function. - * @param {Object} input the input. - * @return {Object} the same input. - */ -function identity(input) { - return input; -} - -/** - * Fill in an array with a string. - * @param {String} str the string to use. - * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to fill in (will be mutated). - * @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated array. - */ -function stringToArrayLike(str, array) { - for (var i = 0; i < str.length; ++i) { - array[i] = str.charCodeAt(i) & 0xFF; - } - return array; -} - -/** - * An helper for the function arrayLikeToString. - * This contains static informations and functions that - * can be optimized by the browser JIT compiler. - */ -var arrayToStringHelper = { - /** - * Transform an array of int into a string, chunk by chunk. - * See the performances notes on arrayLikeToString. - * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. - * @param {String} type the type of the array. - * @param {Integer} chunk the chunk size. - * @return {String} the resulting string. - * @throws Error if the chunk is too big for the stack. - */ - stringifyByChunk: function(array, type, chunk) { - var result = [], k = 0, len = array.length; - // shortcut - if (len <= chunk) { - return String.fromCharCode.apply(null, array); - } - while (k < len) { - if (type === "array" || type === "nodebuffer") { - result.push(String.fromCharCode.apply(null, array.slice(k, Math.min(k + chunk, len)))); - } - else { - result.push(String.fromCharCode.apply(null, array.subarray(k, Math.min(k + chunk, len)))); - } - k += chunk; - } - return result.join(""); - }, - /** - * Call String.fromCharCode on every item in the array. - * This is the naive implementation, which generate A LOT of intermediate string. - * This should be used when everything else fail. - * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. - * @return {String} the result. - */ - stringifyByChar: function(array){ - var resultStr = ""; - for(var i = 0; i < array.length; i++) { - resultStr += String.fromCharCode(array[i]); - } - return resultStr; - }, - applyCanBeUsed : { - /** - * true if the browser accepts to use String.fromCharCode on Uint8Array - */ - uint8array : (function () { - try { - return support.uint8array && String.fromCharCode.apply(null, new Uint8Array(1)).length === 1; - } catch (e) { - return false; - } - })(), - /** - * true if the browser accepts to use String.fromCharCode on nodejs Buffer. - */ - nodebuffer : (function () { - try { - return support.nodebuffer && String.fromCharCode.apply(null, nodejsUtils.allocBuffer(1)).length === 1; - } catch (e) { - return false; - } - })() - } -}; - -/** - * Transform an array-like object to a string. - * @param {Array|ArrayBuffer|Uint8Array|Buffer} array the array to transform. - * @return {String} the result. - */ -function arrayLikeToString(array) { - // Performances notes : - // -------------------- - // String.fromCharCode.apply(null, array) is the fastest, see - // see http://jsperf.com/converting-a-uint8array-to-a-string/2 - // but the stack is limited (and we can get huge arrays !). - // - // result += String.fromCharCode(array[i]); generate too many strings ! - // - // This code is inspired by http://jsperf.com/arraybuffer-to-string-apply-performance/2 - // TODO : we now have workers that split the work. Do we still need that ? - var chunk = 65536, - type = exports.getTypeOf(array), - canUseApply = true; - if (type === "uint8array") { - canUseApply = arrayToStringHelper.applyCanBeUsed.uint8array; - } else if (type === "nodebuffer") { - canUseApply = arrayToStringHelper.applyCanBeUsed.nodebuffer; - } - - if (canUseApply) { - while (chunk > 1) { - try { - return arrayToStringHelper.stringifyByChunk(array, type, chunk); - } catch (e) { - chunk = Math.floor(chunk / 2); - } - } - } - - // no apply or chunk error : slow and painful algorithm - // default browser on android 4.* - return arrayToStringHelper.stringifyByChar(array); -} - -exports.applyFromCharCode = arrayLikeToString; - - -/** - * Copy the data from an array-like to an other array-like. - * @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayFrom the origin array. - * @param {Array|ArrayBuffer|Uint8Array|Buffer} arrayTo the destination array which will be mutated. - * @return {Array|ArrayBuffer|Uint8Array|Buffer} the updated destination array. - */ -function arrayLikeToArrayLike(arrayFrom, arrayTo) { - for (var i = 0; i < arrayFrom.length; i++) { - arrayTo[i] = arrayFrom[i]; - } - return arrayTo; -} - -// a matrix containing functions to transform everything into everything. -var transform = {}; - -// string to ? -transform["string"] = { - "string": identity, - "array": function(input) { - return stringToArrayLike(input, new Array(input.length)); - }, - "arraybuffer": function(input) { - return transform["string"]["uint8array"](input).buffer; - }, - "uint8array": function(input) { - return stringToArrayLike(input, new Uint8Array(input.length)); - }, - "nodebuffer": function(input) { - return stringToArrayLike(input, nodejsUtils.allocBuffer(input.length)); - } -}; - -// array to ? -transform["array"] = { - "string": arrayLikeToString, - "array": identity, - "arraybuffer": function(input) { - return (new Uint8Array(input)).buffer; - }, - "uint8array": function(input) { - return new Uint8Array(input); - }, - "nodebuffer": function(input) { - return nodejsUtils.newBufferFrom(input); - } -}; - -// arraybuffer to ? -transform["arraybuffer"] = { - "string": function(input) { - return arrayLikeToString(new Uint8Array(input)); - }, - "array": function(input) { - return arrayLikeToArrayLike(new Uint8Array(input), new Array(input.byteLength)); - }, - "arraybuffer": identity, - "uint8array": function(input) { - return new Uint8Array(input); - }, - "nodebuffer": function(input) { - return nodejsUtils.newBufferFrom(new Uint8Array(input)); - } -}; - -// uint8array to ? -transform["uint8array"] = { - "string": arrayLikeToString, - "array": function(input) { - return arrayLikeToArrayLike(input, new Array(input.length)); - }, - "arraybuffer": function(input) { - return input.buffer; - }, - "uint8array": identity, - "nodebuffer": function(input) { - return nodejsUtils.newBufferFrom(input); - } -}; - -// nodebuffer to ? -transform["nodebuffer"] = { - "string": arrayLikeToString, - "array": function(input) { - return arrayLikeToArrayLike(input, new Array(input.length)); - }, - "arraybuffer": function(input) { - return transform["nodebuffer"]["uint8array"](input).buffer; - }, - "uint8array": function(input) { - return arrayLikeToArrayLike(input, new Uint8Array(input.length)); - }, - "nodebuffer": identity -}; - -/** - * Transform an input into any type. - * The supported output type are : string, array, uint8array, arraybuffer, nodebuffer. - * If no output type is specified, the unmodified input will be returned. - * @param {String} outputType the output type. - * @param {String|Array|ArrayBuffer|Uint8Array|Buffer} input the input to convert. - * @throws {Error} an Error if the browser doesn't support the requested output type. - */ -exports.transformTo = function(outputType, input) { - if (!input) { - // undefined, null, etc - // an empty string won't harm. - input = ""; - } - if (!outputType) { - return input; - } - exports.checkSupport(outputType); - var inputType = exports.getTypeOf(input); - var result = transform[inputType][outputType](input); - return result; -}; - -/** - * Return the type of the input. - * The type will be in a format valid for JSZip.utils.transformTo : string, array, uint8array, arraybuffer. - * @param {Object} input the input to identify. - * @return {String} the (lowercase) type of the input. - */ -exports.getTypeOf = function(input) { - if (typeof input === "string") { - return "string"; - } - if (Object.prototype.toString.call(input) === "[object Array]") { - return "array"; - } - if (support.nodebuffer && nodejsUtils.isBuffer(input)) { - return "nodebuffer"; - } - if (support.uint8array && input instanceof Uint8Array) { - return "uint8array"; - } - if (support.arraybuffer && input instanceof ArrayBuffer) { - return "arraybuffer"; - } -}; - -/** - * Throw an exception if the type is not supported. - * @param {String} type the type to check. - * @throws {Error} an Error if the browser doesn't support the requested type. - */ -exports.checkSupport = function(type) { - var supported = support[type.toLowerCase()]; - if (!supported) { - throw new Error(type + " is not supported by this platform"); - } -}; - -exports.MAX_VALUE_16BITS = 65535; -exports.MAX_VALUE_32BITS = -1; // well, "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" is parsed as -1 - -/** - * Prettify a string read as binary. - * @param {string} str the string to prettify. - * @return {string} a pretty string. - */ -exports.pretty = function(str) { - var res = '', - code, i; - for (i = 0; i < (str || "").length; i++) { - code = str.charCodeAt(i); - res += '\\x' + (code < 16 ? "0" : "") + code.toString(16).toUpperCase(); - } - return res; -}; - -/** - * Defer the call of a function. - * @param {Function} callback the function to call asynchronously. - * @param {Array} args the arguments to give to the callback. - */ -exports.delay = function(callback, args, self) { - setImmediate(function () { - callback.apply(self || null, args || []); - }); -}; - -/** - * Extends a prototype with an other, without calling a constructor with - * side effects. Inspired by nodejs' `utils.inherits` - * @param {Function} ctor the constructor to augment - * @param {Function} superCtor the parent constructor to use - */ -exports.inherits = function (ctor, superCtor) { - var Obj = function() {}; - Obj.prototype = superCtor.prototype; - ctor.prototype = new Obj(); -}; - -/** - * Merge the objects passed as parameters into a new one. - * @private - * @param {...Object} var_args All objects to merge. - * @return {Object} a new object with the data of the others. - */ -exports.extend = function() { - var result = {}, i, attr; - for (i = 0; i < arguments.length; i++) { // arguments is not enumerable in some browsers - for (attr in arguments[i]) { - if (arguments[i].hasOwnProperty(attr) && typeof result[attr] === "undefined") { - result[attr] = arguments[i][attr]; - } - } - } - return result; -}; - -/** - * Transform arbitrary content into a Promise. - * @param {String} name a name for the content being processed. - * @param {Object} inputData the content to process. - * @param {Boolean} isBinary true if the content is not an unicode string - * @param {Boolean} isOptimizedBinaryString true if the string content only has one byte per character. - * @param {Boolean} isBase64 true if the string content is encoded with base64. - * @return {Promise} a promise in a format usable by JSZip. - */ -exports.prepareContent = function(name, inputData, isBinary, isOptimizedBinaryString, isBase64) { - - // if inputData is already a promise, this flatten it. - var promise = external.Promise.resolve(inputData).then(function(data) { - - - var isBlob = support.blob && (data instanceof Blob || ['[object File]', '[object Blob]'].indexOf(Object.prototype.toString.call(data)) !== -1); - - if (isBlob && typeof FileReader !== "undefined") { - return new external.Promise(function (resolve, reject) { - var reader = new FileReader(); - - reader.onload = function(e) { - resolve(e.target.result); - }; - reader.onerror = function(e) { - reject(e.target.error); - }; - reader.readAsArrayBuffer(data); - }); - } else { - return data; - } - }); - - return promise.then(function(data) { - var dataType = exports.getTypeOf(data); - - if (!dataType) { - return external.Promise.reject( - new Error("Can't read the data of '" + name + "'. Is it " + - "in a supported JavaScript type (String, Blob, ArrayBuffer, etc) ?") - ); - } - // special case : it's way easier to work with Uint8Array than with ArrayBuffer - if (dataType === "arraybuffer") { - data = exports.transformTo("uint8array", data); - } else if (dataType === "string") { - if (isBase64) { - data = base64.decode(data); - } - else if (isBinary) { - // optimizedBinaryString === true means that the file has already been filtered with a 0xFF mask - if (isOptimizedBinaryString !== true) { - // this is a string, not in a base64 format. - // Be sure that this is a correct "binary string" - data = string2binary(data); - } - } - } - return data; - }); -}; - -},{"./base64":1,"./external":6,"./nodejsUtils":14,"./support":30,"set-immediate-shim":54}],33:[function(require,module,exports){ -'use strict'; -var readerFor = require('./reader/readerFor'); -var utils = require('./utils'); -var sig = require('./signature'); -var ZipEntry = require('./zipEntry'); -var utf8 = require('./utf8'); -var support = require('./support'); -// class ZipEntries {{{ -/** - * All the entries in the zip file. - * @constructor - * @param {Object} loadOptions Options for loading the stream. - */ -function ZipEntries(loadOptions) { - this.files = []; - this.loadOptions = loadOptions; -} -ZipEntries.prototype = { - /** - * Check that the reader is on the specified signature. - * @param {string} expectedSignature the expected signature. - * @throws {Error} if it is an other signature. - */ - checkSignature: function(expectedSignature) { - if (!this.reader.readAndCheckSignature(expectedSignature)) { - this.reader.index -= 4; - var signature = this.reader.readString(4); - throw new Error("Corrupted zip or bug: unexpected signature " + "(" + utils.pretty(signature) + ", expected " + utils.pretty(expectedSignature) + ")"); - } - }, - /** - * Check if the given signature is at the given index. - * @param {number} askedIndex the index to check. - * @param {string} expectedSignature the signature to expect. - * @return {boolean} true if the signature is here, false otherwise. - */ - isSignature: function(askedIndex, expectedSignature) { - var currentIndex = this.reader.index; - this.reader.setIndex(askedIndex); - var signature = this.reader.readString(4); - var result = signature === expectedSignature; - this.reader.setIndex(currentIndex); - return result; - }, - /** - * Read the end of the central directory. - */ - readBlockEndOfCentral: function() { - this.diskNumber = this.reader.readInt(2); - this.diskWithCentralDirStart = this.reader.readInt(2); - this.centralDirRecordsOnThisDisk = this.reader.readInt(2); - this.centralDirRecords = this.reader.readInt(2); - this.centralDirSize = this.reader.readInt(4); - this.centralDirOffset = this.reader.readInt(4); - - this.zipCommentLength = this.reader.readInt(2); - // warning : the encoding depends of the system locale - // On a linux machine with LANG=en_US.utf8, this field is utf8 encoded. - // On a windows machine, this field is encoded with the localized windows code page. - var zipComment = this.reader.readData(this.zipCommentLength); - var decodeParamType = support.uint8array ? "uint8array" : "array"; - // To get consistent behavior with the generation part, we will assume that - // this is utf8 encoded unless specified otherwise. - var decodeContent = utils.transformTo(decodeParamType, zipComment); - this.zipComment = this.loadOptions.decodeFileName(decodeContent); - }, - /** - * Read the end of the Zip 64 central directory. - * Not merged with the method readEndOfCentral : - * The end of central can coexist with its Zip64 brother, - * I don't want to read the wrong number of bytes ! - */ - readBlockZip64EndOfCentral: function() { - this.zip64EndOfCentralSize = this.reader.readInt(8); - this.reader.skip(4); - // this.versionMadeBy = this.reader.readString(2); - // this.versionNeeded = this.reader.readInt(2); - this.diskNumber = this.reader.readInt(4); - this.diskWithCentralDirStart = this.reader.readInt(4); - this.centralDirRecordsOnThisDisk = this.reader.readInt(8); - this.centralDirRecords = this.reader.readInt(8); - this.centralDirSize = this.reader.readInt(8); - this.centralDirOffset = this.reader.readInt(8); - - this.zip64ExtensibleData = {}; - var extraDataSize = this.zip64EndOfCentralSize - 44, - index = 0, - extraFieldId, - extraFieldLength, - extraFieldValue; - while (index < extraDataSize) { - extraFieldId = this.reader.readInt(2); - extraFieldLength = this.reader.readInt(4); - extraFieldValue = this.reader.readData(extraFieldLength); - this.zip64ExtensibleData[extraFieldId] = { - id: extraFieldId, - length: extraFieldLength, - value: extraFieldValue - }; - } - }, - /** - * Read the end of the Zip 64 central directory locator. - */ - readBlockZip64EndOfCentralLocator: function() { - this.diskWithZip64CentralDirStart = this.reader.readInt(4); - this.relativeOffsetEndOfZip64CentralDir = this.reader.readInt(8); - this.disksCount = this.reader.readInt(4); - if (this.disksCount > 1) { - throw new Error("Multi-volumes zip are not supported"); - } - }, - /** - * Read the local files, based on the offset read in the central part. - */ - readLocalFiles: function() { - var i, file; - for (i = 0; i < this.files.length; i++) { - file = this.files[i]; - this.reader.setIndex(file.localHeaderOffset); - this.checkSignature(sig.LOCAL_FILE_HEADER); - file.readLocalPart(this.reader); - file.handleUTF8(); - file.processAttributes(); - } - }, - /** - * Read the central directory. - */ - readCentralDir: function() { - var file; - - this.reader.setIndex(this.centralDirOffset); - while (this.reader.readAndCheckSignature(sig.CENTRAL_FILE_HEADER)) { - file = new ZipEntry({ - zip64: this.zip64 - }, this.loadOptions); - file.readCentralPart(this.reader); - this.files.push(file); - } - - if (this.centralDirRecords !== this.files.length) { - if (this.centralDirRecords !== 0 && this.files.length === 0) { - // We expected some records but couldn't find ANY. - // This is really suspicious, as if something went wrong. - throw new Error("Corrupted zip or bug: expected " + this.centralDirRecords + " records in central dir, got " + this.files.length); - } else { - // We found some records but not all. - // Something is wrong but we got something for the user: no error here. - // console.warn("expected", this.centralDirRecords, "records in central dir, got", this.files.length); - } - } - }, - /** - * Read the end of central directory. - */ - readEndOfCentral: function() { - var offset = this.reader.lastIndexOfSignature(sig.CENTRAL_DIRECTORY_END); - if (offset < 0) { - // Check if the content is a truncated zip or complete garbage. - // A "LOCAL_FILE_HEADER" is not required at the beginning (auto - // extractible zip for example) but it can give a good hint. - // If an ajax request was used without responseType, we will also - // get unreadable data. - var isGarbage = !this.isSignature(0, sig.LOCAL_FILE_HEADER); - - if (isGarbage) { - throw new Error("Can't find end of central directory : is this a zip file ? " + - "If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html"); - } else { - throw new Error("Corrupted zip: can't find end of central directory"); - } - - } - this.reader.setIndex(offset); - var endOfCentralDirOffset = offset; - this.checkSignature(sig.CENTRAL_DIRECTORY_END); - this.readBlockEndOfCentral(); - - - /* extract from the zip spec : - 4) If one of the fields in the end of central directory - record is too small to hold required data, the field - should be set to -1 (0xFFFF or 0xFFFFFFFF) and the - ZIP64 format record should be created. - 5) The end of central directory record and the - Zip64 end of central directory locator record must - reside on the same disk when splitting or spanning - an archive. - */ - if (this.diskNumber === utils.MAX_VALUE_16BITS || this.diskWithCentralDirStart === utils.MAX_VALUE_16BITS || this.centralDirRecordsOnThisDisk === utils.MAX_VALUE_16BITS || this.centralDirRecords === utils.MAX_VALUE_16BITS || this.centralDirSize === utils.MAX_VALUE_32BITS || this.centralDirOffset === utils.MAX_VALUE_32BITS) { - this.zip64 = true; - - /* - Warning : the zip64 extension is supported, but ONLY if the 64bits integer read from - the zip file can fit into a 32bits integer. This cannot be solved : JavaScript represents - all numbers as 64-bit double precision IEEE 754 floating point numbers. - So, we have 53bits for integers and bitwise operations treat everything as 32bits. - see https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Operators/Bitwise_Operators - and http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf section 8.5 - */ - - // should look for a zip64 EOCD locator - offset = this.reader.lastIndexOfSignature(sig.ZIP64_CENTRAL_DIRECTORY_LOCATOR); - if (offset < 0) { - throw new Error("Corrupted zip: can't find the ZIP64 end of central directory locator"); - } - this.reader.setIndex(offset); - this.checkSignature(sig.ZIP64_CENTRAL_DIRECTORY_LOCATOR); - this.readBlockZip64EndOfCentralLocator(); - - // now the zip64 EOCD record - if (!this.isSignature(this.relativeOffsetEndOfZip64CentralDir, sig.ZIP64_CENTRAL_DIRECTORY_END)) { - // console.warn("ZIP64 end of central directory not where expected."); - this.relativeOffsetEndOfZip64CentralDir = this.reader.lastIndexOfSignature(sig.ZIP64_CENTRAL_DIRECTORY_END); - if (this.relativeOffsetEndOfZip64CentralDir < 0) { - throw new Error("Corrupted zip: can't find the ZIP64 end of central directory"); - } - } - this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir); - this.checkSignature(sig.ZIP64_CENTRAL_DIRECTORY_END); - this.readBlockZip64EndOfCentral(); - } - - var expectedEndOfCentralDirOffset = this.centralDirOffset + this.centralDirSize; - if (this.zip64) { - expectedEndOfCentralDirOffset += 20; // end of central dir 64 locator - expectedEndOfCentralDirOffset += 12 /* should not include the leading 12 bytes */ + this.zip64EndOfCentralSize; - } - - var extraBytes = endOfCentralDirOffset - expectedEndOfCentralDirOffset; - - if (extraBytes > 0) { - // console.warn(extraBytes, "extra bytes at beginning or within zipfile"); - if (this.isSignature(endOfCentralDirOffset, sig.CENTRAL_FILE_HEADER)) { - // The offsets seem wrong, but we have something at the specified offset. - // So… we keep it. - } else { - // the offset is wrong, update the "zero" of the reader - // this happens if data has been prepended (crx files for example) - this.reader.zero = extraBytes; - } - } else if (extraBytes < 0) { - throw new Error("Corrupted zip: missing " + Math.abs(extraBytes) + " bytes."); - } - }, - prepareReader: function(data) { - this.reader = readerFor(data); - }, - /** - * Read a zip file and create ZipEntries. - * @param {String|ArrayBuffer|Uint8Array|Buffer} data the binary string representing a zip file. - */ - load: function(data) { - this.prepareReader(data); - this.readEndOfCentral(); - this.readCentralDir(); - this.readLocalFiles(); - } -}; -// }}} end of ZipEntries -module.exports = ZipEntries; - -},{"./reader/readerFor":22,"./signature":23,"./support":30,"./utf8":31,"./utils":32,"./zipEntry":34}],34:[function(require,module,exports){ -'use strict'; -var readerFor = require('./reader/readerFor'); -var utils = require('./utils'); -var CompressedObject = require('./compressedObject'); -var crc32fn = require('./crc32'); -var utf8 = require('./utf8'); -var compressions = require('./compressions'); -var support = require('./support'); - -var MADE_BY_DOS = 0x00; -var MADE_BY_UNIX = 0x03; - -/** - * Find a compression registered in JSZip. - * @param {string} compressionMethod the method magic to find. - * @return {Object|null} the JSZip compression object, null if none found. - */ -var findCompression = function(compressionMethod) { - for (var method in compressions) { - if (!compressions.hasOwnProperty(method)) { - continue; - } - if (compressions[method].magic === compressionMethod) { - return compressions[method]; - } - } - return null; -}; - -// class ZipEntry {{{ -/** - * An entry in the zip file. - * @constructor - * @param {Object} options Options of the current file. - * @param {Object} loadOptions Options for loading the stream. - */ -function ZipEntry(options, loadOptions) { - this.options = options; - this.loadOptions = loadOptions; -} -ZipEntry.prototype = { - /** - * say if the file is encrypted. - * @return {boolean} true if the file is encrypted, false otherwise. - */ - isEncrypted: function() { - // bit 1 is set - return (this.bitFlag & 0x0001) === 0x0001; - }, - /** - * say if the file has utf-8 filename/comment. - * @return {boolean} true if the filename/comment is in utf-8, false otherwise. - */ - useUTF8: function() { - // bit 11 is set - return (this.bitFlag & 0x0800) === 0x0800; - }, - /** - * Read the local part of a zip file and add the info in this object. - * @param {DataReader} reader the reader to use. - */ - readLocalPart: function(reader) { - var compression, localExtraFieldsLength; - - // we already know everything from the central dir ! - // If the central dir data are false, we are doomed. - // On the bright side, the local part is scary : zip64, data descriptors, both, etc. - // The less data we get here, the more reliable this should be. - // Let's skip the whole header and dash to the data ! - reader.skip(22); - // in some zip created on windows, the filename stored in the central dir contains \ instead of /. - // Strangely, the filename here is OK. - // I would love to treat these zip files as corrupted (see http://www.info-zip.org/FAQ.html#backslashes - // or APPNOTE#4.4.17.1, "All slashes MUST be forward slashes '/'") but there are a lot of bad zip generators... - // Search "unzip mismatching "local" filename continuing with "central" filename version" on - // the internet. - // - // I think I see the logic here : the central directory is used to display - // content and the local directory is used to extract the files. Mixing / and \ - // may be used to display \ to windows users and use / when extracting the files. - // Unfortunately, this lead also to some issues : http://seclists.org/fulldisclosure/2009/Sep/394 - this.fileNameLength = reader.readInt(2); - localExtraFieldsLength = reader.readInt(2); // can't be sure this will be the same as the central dir - // the fileName is stored as binary data, the handleUTF8 method will take care of the encoding. - this.fileName = reader.readData(this.fileNameLength); - reader.skip(localExtraFieldsLength); - - if (this.compressedSize === -1 || this.uncompressedSize === -1) { - throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory " + "(compressedSize === -1 || uncompressedSize === -1)"); - } - - compression = findCompression(this.compressionMethod); - if (compression === null) { // no compression found - throw new Error("Corrupted zip : compression " + utils.pretty(this.compressionMethod) + " unknown (inner file : " + utils.transformTo("string", this.fileName) + ")"); - } - this.decompressed = new CompressedObject(this.compressedSize, this.uncompressedSize, this.crc32, compression, reader.readData(this.compressedSize)); - }, - - /** - * Read the central part of a zip file and add the info in this object. - * @param {DataReader} reader the reader to use. - */ - readCentralPart: function(reader) { - this.versionMadeBy = reader.readInt(2); - reader.skip(2); - // this.versionNeeded = reader.readInt(2); - this.bitFlag = reader.readInt(2); - this.compressionMethod = reader.readString(2); - this.date = reader.readDate(); - this.crc32 = reader.readInt(4); - this.compressedSize = reader.readInt(4); - this.uncompressedSize = reader.readInt(4); - var fileNameLength = reader.readInt(2); - this.extraFieldsLength = reader.readInt(2); - this.fileCommentLength = reader.readInt(2); - this.diskNumberStart = reader.readInt(2); - this.internalFileAttributes = reader.readInt(2); - this.externalFileAttributes = reader.readInt(4); - this.localHeaderOffset = reader.readInt(4); - - if (this.isEncrypted()) { - throw new Error("Encrypted zip are not supported"); - } - - // will be read in the local part, see the comments there - reader.skip(fileNameLength); - this.readExtraFields(reader); - this.parseZIP64ExtraField(reader); - this.fileComment = reader.readData(this.fileCommentLength); - }, - - /** - * Parse the external file attributes and get the unix/dos permissions. - */ - processAttributes: function () { - this.unixPermissions = null; - this.dosPermissions = null; - var madeBy = this.versionMadeBy >> 8; - - // Check if we have the DOS directory flag set. - // We look for it in the DOS and UNIX permissions - // but some unknown platform could set it as a compatibility flag. - this.dir = this.externalFileAttributes & 0x0010 ? true : false; - - if(madeBy === MADE_BY_DOS) { - // first 6 bits (0 to 5) - this.dosPermissions = this.externalFileAttributes & 0x3F; - } - - if(madeBy === MADE_BY_UNIX) { - this.unixPermissions = (this.externalFileAttributes >> 16) & 0xFFFF; - // the octal permissions are in (this.unixPermissions & 0x01FF).toString(8); - } - - // fail safe : if the name ends with a / it probably means a folder - if (!this.dir && this.fileNameStr.slice(-1) === '/') { - this.dir = true; - } - }, - - /** - * Parse the ZIP64 extra field and merge the info in the current ZipEntry. - * @param {DataReader} reader the reader to use. - */ - parseZIP64ExtraField: function(reader) { - - if (!this.extraFields[0x0001]) { - return; - } - - // should be something, preparing the extra reader - var extraReader = readerFor(this.extraFields[0x0001].value); - - // I really hope that these 64bits integer can fit in 32 bits integer, because js - // won't let us have more. - if (this.uncompressedSize === utils.MAX_VALUE_32BITS) { - this.uncompressedSize = extraReader.readInt(8); - } - if (this.compressedSize === utils.MAX_VALUE_32BITS) { - this.compressedSize = extraReader.readInt(8); - } - if (this.localHeaderOffset === utils.MAX_VALUE_32BITS) { - this.localHeaderOffset = extraReader.readInt(8); - } - if (this.diskNumberStart === utils.MAX_VALUE_32BITS) { - this.diskNumberStart = extraReader.readInt(4); - } - }, - /** - * Read the central part of a zip file and add the info in this object. - * @param {DataReader} reader the reader to use. - */ - readExtraFields: function(reader) { - var end = reader.index + this.extraFieldsLength, - extraFieldId, - extraFieldLength, - extraFieldValue; - - if (!this.extraFields) { - this.extraFields = {}; - } - - while (reader.index < end) { - extraFieldId = reader.readInt(2); - extraFieldLength = reader.readInt(2); - extraFieldValue = reader.readData(extraFieldLength); - - this.extraFields[extraFieldId] = { - id: extraFieldId, - length: extraFieldLength, - value: extraFieldValue - }; - } - }, - /** - * Apply an UTF8 transformation if needed. - */ - handleUTF8: function() { - var decodeParamType = support.uint8array ? "uint8array" : "array"; - if (this.useUTF8()) { - this.fileNameStr = utf8.utf8decode(this.fileName); - this.fileCommentStr = utf8.utf8decode(this.fileComment); - } else { - var upath = this.findExtraFieldUnicodePath(); - if (upath !== null) { - this.fileNameStr = upath; - } else { - // ASCII text or unsupported code page - var fileNameByteArray = utils.transformTo(decodeParamType, this.fileName); - this.fileNameStr = this.loadOptions.decodeFileName(fileNameByteArray); - } - - var ucomment = this.findExtraFieldUnicodeComment(); - if (ucomment !== null) { - this.fileCommentStr = ucomment; - } else { - // ASCII text or unsupported code page - var commentByteArray = utils.transformTo(decodeParamType, this.fileComment); - this.fileCommentStr = this.loadOptions.decodeFileName(commentByteArray); - } - } - }, - - /** - * Find the unicode path declared in the extra field, if any. - * @return {String} the unicode path, null otherwise. - */ - findExtraFieldUnicodePath: function() { - var upathField = this.extraFields[0x7075]; - if (upathField) { - var extraReader = readerFor(upathField.value); - - // wrong version - if (extraReader.readInt(1) !== 1) { - return null; - } - - // the crc of the filename changed, this field is out of date. - if (crc32fn(this.fileName) !== extraReader.readInt(4)) { - return null; - } - - return utf8.utf8decode(extraReader.readData(upathField.length - 5)); - } - return null; - }, - - /** - * Find the unicode comment declared in the extra field, if any. - * @return {String} the unicode comment, null otherwise. - */ - findExtraFieldUnicodeComment: function() { - var ucommentField = this.extraFields[0x6375]; - if (ucommentField) { - var extraReader = readerFor(ucommentField.value); - - // wrong version - if (extraReader.readInt(1) !== 1) { - return null; - } - - // the crc of the comment changed, this field is out of date. - if (crc32fn(this.fileComment) !== extraReader.readInt(4)) { - return null; - } - - return utf8.utf8decode(extraReader.readData(ucommentField.length - 5)); - } - return null; - } -}; -module.exports = ZipEntry; - -},{"./compressedObject":2,"./compressions":3,"./crc32":4,"./reader/readerFor":22,"./support":30,"./utf8":31,"./utils":32}],35:[function(require,module,exports){ -'use strict'; - -var StreamHelper = require('./stream/StreamHelper'); -var DataWorker = require('./stream/DataWorker'); -var utf8 = require('./utf8'); -var CompressedObject = require('./compressedObject'); -var GenericWorker = require('./stream/GenericWorker'); - -/** - * A simple object representing a file in the zip file. - * @constructor - * @param {string} name the name of the file - * @param {String|ArrayBuffer|Uint8Array|Buffer} data the data - * @param {Object} options the options of the file - */ -var ZipObject = function(name, data, options) { - this.name = name; - this.dir = options.dir; - this.date = options.date; - this.comment = options.comment; - this.unixPermissions = options.unixPermissions; - this.dosPermissions = options.dosPermissions; - - this._data = data; - this._dataBinary = options.binary; - // keep only the compression - this.options = { - compression : options.compression, - compressionOptions : options.compressionOptions - }; -}; - -ZipObject.prototype = { - /** - * Create an internal stream for the content of this object. - * @param {String} type the type of each chunk. - * @return StreamHelper the stream. - */ - internalStream: function (type) { - var result = null, outputType = "string"; - try { - if (!type) { - throw new Error("No output type specified."); - } - outputType = type.toLowerCase(); - var askUnicodeString = outputType === "string" || outputType === "text"; - if (outputType === "binarystring" || outputType === "text") { - outputType = "string"; - } - result = this._decompressWorker(); - - var isUnicodeString = !this._dataBinary; - - if (isUnicodeString && !askUnicodeString) { - result = result.pipe(new utf8.Utf8EncodeWorker()); - } - if (!isUnicodeString && askUnicodeString) { - result = result.pipe(new utf8.Utf8DecodeWorker()); - } - } catch (e) { - result = new GenericWorker("error"); - result.error(e); - } - - return new StreamHelper(result, outputType, ""); - }, - - /** - * Prepare the content in the asked type. - * @param {String} type the type of the result. - * @param {Function} onUpdate a function to call on each internal update. - * @return Promise the promise of the result. - */ - async: function (type, onUpdate) { - return this.internalStream(type).accumulate(onUpdate); - }, - - /** - * Prepare the content as a nodejs stream. - * @param {String} type the type of each chunk. - * @param {Function} onUpdate a function to call on each internal update. - * @return Stream the stream. - */ - nodeStream: function (type, onUpdate) { - return this.internalStream(type || "nodebuffer").toNodejsStream(onUpdate); - }, - - /** - * Return a worker for the compressed content. - * @private - * @param {Object} compression the compression object to use. - * @param {Object} compressionOptions the options to use when compressing. - * @return Worker the worker. - */ - _compressWorker: function (compression, compressionOptions) { - if ( - this._data instanceof CompressedObject && - this._data.compression.magic === compression.magic - ) { - return this._data.getCompressedWorker(); - } else { - var result = this._decompressWorker(); - if(!this._dataBinary) { - result = result.pipe(new utf8.Utf8EncodeWorker()); - } - return CompressedObject.createWorkerFrom(result, compression, compressionOptions); - } - }, - /** - * Return a worker for the decompressed content. - * @private - * @return Worker the worker. - */ - _decompressWorker : function () { - if (this._data instanceof CompressedObject) { - return this._data.getContentWorker(); - } else if (this._data instanceof GenericWorker) { - return this._data; - } else { - return new DataWorker(this._data); - } - } -}; - -var removedMethods = ["asText", "asBinary", "asNodeBuffer", "asUint8Array", "asArrayBuffer"]; -var removedFn = function () { - throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide."); -}; - -for(var i = 0; i < removedMethods.length; i++) { - ZipObject.prototype[removedMethods[i]] = removedFn; -} -module.exports = ZipObject; - -},{"./compressedObject":2,"./stream/DataWorker":27,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31}],36:[function(require,module,exports){ -(function (global){ -'use strict'; -var Mutation = global.MutationObserver || global.WebKitMutationObserver; - -var scheduleDrain; - -{ - if (Mutation) { - var called = 0; - var observer = new Mutation(nextTick); - var element = global.document.createTextNode(''); - observer.observe(element, { - characterData: true - }); - scheduleDrain = function () { - element.data = (called = ++called % 2); - }; - } else if (!global.setImmediate && typeof global.MessageChannel !== 'undefined') { - var channel = new global.MessageChannel(); - channel.port1.onmessage = nextTick; - scheduleDrain = function () { - channel.port2.postMessage(0); - }; - } else if ('document' in global && 'onreadystatechange' in global.document.createElement('script')) { - scheduleDrain = function () { - - // Create a - - - - + + -
- -
-
-
Package neureka
-

Interface Data<V>

+
neureka
+

Interface Data<V>

-
-
-
Type Parameters:
+
+
+
    +
  • +
    +
    Type Parameters:
    V - The type of the data array.
    -
    +
    All Known Subinterfaces:
    -
    DeviceData<V>
    +
    DeviceData<V>
    -
    +
    All Known Implementing Classes:
    -
    AbstractDeviceData
    +
    AbstractDeviceData

    -
    public interface Data<V>
    +
    +
    public interface Data<V>
    A wrapper type for the raw data array of a tensor/nd-array, - which is typically provided by implementations of the Device interface. - Every tensor/nd-array has a Data object which it uses to access its raw data. + which is typically provided by implementations of the Device interface. + Every tensor/nd-array has a Data object which it uses to access its raw data. Use this to access the raw data of a nd-array and to check where it currently resides. But be careful as this exposes mutable state as well as backend specific implementations and types (e.g. OpenCL / JVM arrays).
    -
-
-
    + +
+
+
+
    +
  • -
  • -
    -

    Method Summary

    -
    -
    -
    -
    -
    Modifier and Type
    -
    Method
    -
    Description
    -
    default <D> D
    -
    as(Class<D> dataType)
    -
    +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Static Methods Instance Methods Abstract Methods Default Methods 
      Modifier and TypeMethod and Description
      default <D> Das(java.lang.Class<D> dataType)
      This returns the underlying raw data object of a nd-array or tensor.
      - - - -
       
      -
      default Object
      -
      get()
      -
      +
      DataType<V>dataType() 
      default java.lang.Objectget()
      This returns the underlying raw data object of a nd-array or tensor of a backend specific type (e.g.
      - - - -
      +
      java.lang.ObjectgetOrNull()
      This returns the underlying raw data object of a nd-array or tensor of a backend specific type (e.g.
      - -
      static Data<Boolean>
      -
      of(boolean... items)
      -
       
      -
      static Data<Byte>
      -
      of(byte... items)
      -
       
      -
      static Data<Character>
      -
      of(char... items)
      -
       
      -
      static Data<Double>
      -
      of(double... items)
      -
       
      -
      static Data<Float>
      -
      of(float... items)
      -
       
      -
      static Data<Integer>
      -
      of(int... items)
      -
       
      -
      static Data<Long>
      -
      of(long... items)
      -
       
      -
      static Data<Short>
      -
      of(short... items)
      -
       
      -
      static <V> Data<V>
      -
      of(Class<V> type, - V... data)
      -
       
      -
      static Data<String>
      -
      of(String... items)
      -
       
      - - -
       
      -
      int
      - -
      +
      static Data<java.lang.Void>none() +
      This is a static factory method which returns a Data object + which does not contain any data.
      +
      static Data<java.lang.Boolean>of(boolean... items) 
      static Data<java.lang.Byte>of(byte... items) 
      static Data<java.lang.Character>of(char... items) 
      static <V> Data<V>of(java.lang.Class<V> type, + V... data) 
      static Data<java.lang.Double>of(double... items) 
      static Data<java.lang.Float>of(float... items) 
      static Data<java.lang.Integer>of(int... items) 
      static Data<java.lang.Long>of(long... items) 
      static Data<java.lang.Short>of(short... items) 
      static Data<java.lang.String>of(java.lang.String... items) 
      Device<V>owner() 
      intusages()
      This method returns the number of times this data object is currently in use by a nd-array, meaning that the number of usages is also the number of nd-arrays which are currently referencing this data object.
      - - - - - +
      +
    • +
- -
-
    +
+
+
    +
  • -
  • -
    -

    Method Details

    -
      -
    • -
      -

      of

      -
      static <V> Data<V> of(Class<V> type, - V... data)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          none

          +
          static Data<java.lang.Void> none()
          +
          This is a static factory method which returns a Data object + which does not contain any data. It is a sort of no-operation null object + which can be used to represent the absence of data. + A deleted tensor will typically have a Data object which does not contain any data.
          +
          +
          Returns:
          +
          A Data object which does not contain any data.
          +
        • -
        • -
          -

          of

          -
          static Data<Float> of(float... items)
          -
          +
        + + + + + +
          +
        • +

          of

          +
          static <V> Data<V> of(java.lang.Class<V> type,
          +                      V... data)
        • -
        • -
          -

          of

          -
          static Data<Double> of(double... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Float> of(float... items)
        • -
        • -
          -

          of

          -
          static Data<Integer> of(int... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Double> of(double... items)
        • -
        • -
          -

          of

          -
          static Data<Long> of(long... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Integer> of(int... items)
        • -
        • -
          -

          of

          -
          static Data<Byte> of(byte... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Long> of(long... items)
        • -
        • -
          -

          of

          -
          static Data<Short> of(short... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Byte> of(byte... items)
        • -
        • -
          -

          of

          -
          static Data<Boolean> of(boolean... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Short> of(short... items)
        • -
        • -
          -

          of

          -
          static Data<Character> of(char... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Boolean> of(boolean... items)
          +
        • +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.Character> of(char... items)
        • -
        • -
          -

          of

          -
          static Data<String> of(String... items)
          -
          +
        + + + +
          +
        • +

          of

          +
          static Data<java.lang.String> of(java.lang.String... items)
        • -
        • -
          -

          owner

          -
          Device<V> owner()
          -
          -
          Returns:
          +
        + + + +
          +
        • +

          owner

          +
          Device<V> owner()
          +
          +
          Returns:
          The owner of this data array wrapper (the device which allocated the memory).
          -
  • -
  • -
    -

    getOrNull

    -
    Object getOrNull()
    +
+ + + +
    +
  • +

    getOrNull

    +
    java.lang.Object getOrNull()
    This returns the underlying raw data object of a nd-array or tensor of a backend specific type (e.g. OpenCL memory object or JVM array). - Contrary to the Nda.getItems() ()} method, this will + Contrary to the Nda.getItems() ()} method, this will return an unbiased view on the raw data of this tensor. Be careful using this, as it exposes mutable state!
    -
    -
    Returns:
    +
    +
    Returns:
    The raw data object underlying a nd-array/tensor, or null if the data is not present.
    -
  • -
  • -
    -

    get

    -
    default Object get()
    +
+ + + +
    +
  • +

    get

    +
    default java.lang.Object get()
    This returns the underlying raw data object of a nd-array or tensor of a backend specific type (e.g. OpenCL memory object or JVM array). - Contrary to the Nda.getItems() ()} method, this will + Contrary to the Nda.getItems() ()} method, this will return an unbiased view on the raw data of this tensor. Be careful using this, as it exposes mutable state!
    -
    -
    Returns:
    +
    +
    Returns:
    The raw data object underlying a nd-array/tensor.
    -
    Throws:
    -
    NullPointerException - if the data reference is null.
    +
    Throws:
    +
    java.lang.NullPointerException - if the data reference is null.
    -
  • -
  • -
    -

    as

    -
    default <D> D as(Class<D> dataType)
    +
+ + + +
    +
  • +

    as

    +
    default <D> D as(java.lang.Class<D> dataType)
    This returns the underlying raw data object of a nd-array or tensor. - Contrary to the Nda.getItems() ()} method, this will + Contrary to the Nda.getItems() ()} method, this will return an unbiased view on the raw data of this tensor. Be careful using this, as it exposes mutable state!
    -
    -
    Parameters:
    +
    +
    Parameters:
    dataType - The type the underlying reference object is expected to have (this may be a JVM array or something device specific).
    -
    Returns:
    +
    Returns:
    The raw data object underlying a nd-array/tensor.
    -
  • -
  • -
    -

    dataType

    -
    DataType<V> dataType()
    -
    -
    Returns:
    +
+ + + +
    +
  • +

    dataType

    +
    DataType<V> dataType()
    +
    +
    Returns:
    The data type of the raw data array.
    -
  • -
  • -
    -

    usages

    -
    int usages()
    +
+ + + +
    +
  • +

    usages

    +
    int usages()
    This method returns the number of times this data object is currently in use by a nd-array, meaning that the number of usages is also the number of nd-arrays which are currently referencing this data object.
    The reason why this can be greater than one is because of the existence of sliced, transposed and reshaped nd-arrays which all share the same data object as their parent nd-array.
    -
    -
    Returns:
    +
    +
    Returns:
    The number of times this data object is currently in use by a nd-array.
    -
- - + + +
+
- + + + + diff --git a/docs/jdocs/neureka/MutateNda.Item.html b/docs/jdocs/neureka/MutateNda.Item.html index bb3e6dc31..cc4545966 100644 --- a/docs/jdocs/neureka/MutateNda.Item.html +++ b/docs/jdocs/neureka/MutateNda.Item.html @@ -1,149 +1,249 @@ - + + - -MutateNda.Item (neureka 1.0.0 API) - - - - + +MutateNda.Item (neureka 1.0.1 API) - - - - - - + + -
- -
-
-
Package neureka
-

Interface MutateNda.Item<V>

+
neureka
+

Interface MutateNda.Item<V>

-
-
-
Type Parameters:
+
+
+
    +
  • +
    +
    Type Parameters:
    V - The type of the items of this nd-array.
    -
    +
    All Superinterfaces:
    -
    Nda.Item<V>
    +
    Nda.Item<V>
    -
    +
    Enclosing interface:
    -
    MutateNda<T>
    +
    MutateNda<T>

    -
    public static interface MutateNda.Item<V> -extends Nda.Item<V>
    -
    Instances of this are being returned by the Nda.at(int...) method, +
    +
    public static interface MutateNda.Item<V>
    +extends Nda.Item<V>
    +
    Instances of this are being returned by the Nda.at(int...) method, and they allow you to get or set individual nd-array items
    -
-
-
    + +
+
+
+ - -
-
    +
+
+
    +
  • -
  • -
    -

    Method Details

    -
      -
    • -
      -

      set

      -
      void set(V value)
      +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          set

          +
          void set(V value)
          Set the value at the targeted position.
          -
          -
          Parameters:
          +
          +
          Parameters:
          value - The value to be set at the targeted position.
          -
    -
- + + +
+
- + + + + diff --git a/docs/jdocs/neureka/MutateNda.html b/docs/jdocs/neureka/MutateNda.html index 4dce1ab70..d6d8777d9 100644 --- a/docs/jdocs/neureka/MutateNda.html +++ b/docs/jdocs/neureka/MutateNda.html @@ -1,269 +1,328 @@ - + + - -MutateNda (neureka 1.0.0 API) - - - - + +MutateNda (neureka 1.0.1 API) - - - - - - + + -
- -
-
-
Package neureka
-

Interface MutateNda<T>

+
neureka
+

Interface MutateNda<T>

-
-
+
+
+
    +
  • +
    All Known Subinterfaces:
    -
    MutateTensor<T>
    +
    MutateTensor<T>

    -
    public interface MutateNda<T>
    +
    +
    public interface MutateNda<T>
    Nd-arrays should be used as immutable data structures mostly, however sometimes it is important to mutate their state for performance reasons. This interface exposes several methods for mutating the state of this nd-array. The usage of methods exposed by this API is generally discouraged because the exposed state can easily lead to broken nd-arrays and exceptions...

    -
-
-
    + +
+
+
+
    +
  • -
  • -
    -

    Nested Class Summary

    -
    Nested Classes
    -
    -
    Modifier and Type
    -
    Interface
    -
    Description
    -
    static interface 
    - -
    -
    Instances of this are being returned by the Nda.at(int...) method, +
      +
    • + + +

      Nested Class Summary

      + + + + + + + + + + +
      Nested Classes 
      Modifier and TypeInterface and Description
      static interface MutateNda.Item<V> +
      Instances of this are being returned by the Nda.at(int...) method, and they allow you to get or set individual nd-array items
      - - - +
    • +
    -
  • -
    -

    Method Summary

    -
    -
    -
    -
    -
    Modifier and Type
    -
    Method
    -
    Description
    - -
    assign(Nda<T> other)
    -
    +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Abstract Methods Default Methods 
      Modifier and TypeMethod and Description
      Nda<T>assign(Nda<T> other)
      Use this to assign the provided nd-array to this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
      - - -
      assign(T other)
      -
      +
      Nda<T>assign(T other)
      Use this to assign the provided item to all elements of this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
      - - -
      at(int... indices)
      -
      -
      Exposes the MutateNda.Item interface which allows you to get or set individual nd-array items.
      -
      - - -
      -
      At the heart of every tensor is the Data object, which holds the actual data array, +
      MutateNda.Item<T>at(int... indices) +
      Exposes the MutateNda.Item interface which allows you to get or set individual nd-array items.
      +
      Data<T>getData() +
      At the heart of every tensor is the Data object, which holds the actual data array, a sequence of values of the same type.
      - -
      <A> A
      -
      getDataAs(Class<A> arrayTypeClass)
      -
      +
      <A> AgetDataAs(java.lang.Class<A> arrayTypeClass)
      This method returns the data of this nd-array as a Java array of the specified type.
      - - -
      label(String label)
      -
      +
      Nda<T>label(java.lang.String label)
      Sets the label of this nd-array.
      - - -
      labelAxes(String[]... labels)
      -
      -
      This method receives a nested String array which - ought to contain a label for the index of this tensor.
      -
      - - -
      -
      This method receives a nested String list which +
      Nda<T>labelAxes(java.util.List<java.util.List<java.lang.Object>> labels) +
      This method receives a nested String list which ought to contain a label for the index of this tensor.
      - - - -
      +
      Nda<T>labelAxes(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels)
      This method provides the ability to label not only the indices of the shape of this tensor, but also the dimension of the shape.
      - - -
      putAt(int[] indices, - T value)
      -
      +
      Nda<T>labelAxes(java.lang.String[]... labels) +
      This method receives a nested String array which + ought to contain a label for the index of this tensor.
      +
      Nda<T>putAt(int[] indices, + T value)
      Use this to put a single item at a particular position within this nd-array.
      - - -
      putAt(int index, - T value)
      -
      +
      Nda<T>putAt(int index, + T value)
      Individual entries for value items in this nd-array can be set via this method.
      - - -
      putAt(List<?> key, - Nda<T> value)
      -
      +
      Nda<T>putAt(java.util.List<?> key, + Nda<T> value)
      This method enables injecting slices of nd-array to be assigned into this nd-array! It takes a key of various types which is used to configure a slice nd-array sharing the same underlying data as the original nd-array.
      - - -
      putAt(List<?> indices, - T value)
      -
      +
      Nda<T>putAt(java.util.List<?> indices, + T value)
      Use this to place a single item at a particular position within this nd-array!
      - - -
      putAt(Map<?,Integer> key, - Nda<T> value)
      -
      +
      Nda<T>putAt(java.util.Map<?,java.lang.Integer> key, + Nda<T> value)
      This method enables assigning a provided nd-array to be a subset/slice of this nd-array! It takes a key which is used to configure a slice sharing the same underlying data as the original nd-array.
      - - -
      set(int[] indices, - T value)
      -
      +
      Nda<T>set(int[] indices, + T value)
      Use this to place a single item at a particular position within this nd-array!
      - -
      default Nda<T>
      -
      set(int i0, - int i1, - int i2, - T value)
      -
       
      -
      default Nda<T>
      -
      set(int i0, - int i1, - T value)
      -
       
      - -
      set(int index, - T value)
      -
      +
      default Nda<T>set(int i0, + int i1, + int i2, + T value) 
      default Nda<T>set(int i0, + int i1, + T value) 
      Nda<T>set(int index, + T value)
      Individual entries for value items in this nd-array can be set via this method.
      - - -
      setItemAt(int i, - T o)
      -
      +
      Nda<T>setItemAt(int i, + T o)
      An NDArray implementation ought to have some way to selectively modify its underlying value.
      - - - -
      +
      Nda<T>setItems(java.lang.Object value)
      This method will receive an object an try to interpret it or its contents to be set as value for this nd-array.
      - -
      <V> Nda<V>
      -
      toType(Class<V> typeClass)
      -
      +
      <V> Nda<V>toType(java.lang.Class<V> typeClass)
      This method is an inline operation which changes the underlying data of this tensor.
      - - - - - +
    -
    -
    -
      + +
    +
  • +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        putAt

        -
        Nda<T> putAt(Map<?,Integer> key, - Nda<T> value)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            putAt

            +
            Nda<T> putAt(java.util.Map<?,java.lang.Integer> key,
            +             Nda<T> value)
            This method enables assigning a provided nd-array to be a subset/slice of this nd-array! It takes a key which is used to configure a slice sharing the same underlying data as the original nd-array. @@ -278,170 +337,219 @@

            putAt

            i... start indexAlias.
            j... end indexAlias. (inclusive!)
            k... step size.
            -
            -
            Parameters:
            +
            +
            Parameters:
            key - This object is a map defining a step and a targeted index or range of indices...
            value - The nd-array which ought to be assigned into a slice of this nd-array.
            -
            Returns:
            +
            Returns:
            A slice nd-array or scalar value.
            -
      • -
      • -
        -

        putAt

        -
        Nda<T> putAt(int[] indices, - T value)
        +
      + + + + + +
        +
      • +

        putAt

        +
        Nda<T> putAt(int[] indices,
        +             T value)
        Use this to put a single item at a particular position within this nd-array.
        -
        -
        Parameters:
        +
        +
        Parameters:
        indices - The indices of the nd-position where the provided item should be placed.
        value - The item which should be placed at the position defined by the provided indices.
        -
        Returns:
        +
        Returns:
        This nd-array itself.
        -
    • -
    • -
      -

      set

      -
      Nda<T> set(int[] indices, - T value)
      +
    + + + + + +
      +
    • +

      set

      +
      Nda<T> set(int[] indices,
      +           T value)
      Use this to place a single item at a particular position within this nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - An array of indices targeting a particular position in this nd-array...
      value - the value which ought to be placed at the targeted position.
      -
      Returns:
      +
      Returns:
      This very nd-array in order to enable method chaining...
      -
  • -
  • -
    -

    set

    -
    default Nda<T> set(int i0, - int i1, - T value)
    -
    +
+ + + + + +
    +
  • +

    set

    +
    default Nda<T> set(int i0,
    +                   int i1,
    +                   T value)
  • -
  • -
    -

    set

    -
    default Nda<T> set(int i0, - int i1, - int i2, - T value)
    -
    +
+ + + + + +
    +
  • +

    set

    +
    default Nda<T> set(int i0,
    +                   int i1,
    +                   int i2,
    +                   T value)
  • -
  • -
    -

    putAt

    -
    Nda<T> putAt(int index, - T value)
    +
+ + + + + +
    +
  • +

    putAt

    +
    Nda<T> putAt(int index,
    +             T value)
    Individual entries for value items in this nd-array can be set via this method.
    -
    -
    Parameters:
    +
    +
    Parameters:
    index - The scalar index targeting a specific value position within this nd-array which ought to be replaced by the one provided by the second parameter of this method.
    value - The item which ought to be placed at the targeted position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    set

    -
    Nda<T> set(int index, - T value)
    +
+ + + + + +
    +
  • +

    set

    +
    Nda<T> set(int index,
    +           T value)
    Individual entries for value items in this nd-array can be set via this method.
    -
    -
    Parameters:
    +
    +
    Parameters:
    index - The scalar index targeting a specific value position within this nd-array which ought to be replaced by the one provided by the second parameter of this method.
    value - The item which ought to be placed at the targeted position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    putAt

    -
    Nda<T> putAt(List<?> key, - Nda<T> value)
    +
+ + + +
    +
  • +

    putAt

    +
    Nda<T> putAt(java.util.List<?> key,
    +             Nda<T> value)
    This method enables injecting slices of nd-array to be assigned into this nd-array! It takes a key of various types which is used to configure a slice nd-array sharing the same underlying data as the original nd-array. This slice is then used to assign the second argument to it, namely the "value" argument.
    -
    -
    Parameters:
    +
    +
    Parameters:
    key - This object is a list defining a targeted index or range of indices...
    value - the nd-array which ought to be assigned to a slice of this nd-array.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    putAt

    -
    Nda<T> putAt(List<?> indices, - T value)
    +
+ + + + + +
    +
  • +

    putAt

    +
    Nda<T> putAt(java.util.List<?> indices,
    +             T value)
    Use this to place a single item at a particular position within this nd-array!
    -
    -
    Parameters:
    +
    +
    Parameters:
    indices - A list of indices targeting a particular position in this nd-array...
    value - the value which ought to be placed at the targeted position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    setItemAt

    -
    Nda<T> setItemAt(int i, - T o)
    +
+ + + + + +
    +
  • +

    setItemAt

    +
    Nda<T> setItemAt(int i,
    +                 T o)
    An NDArray implementation ought to have some way to selectively modify its underlying value. This method simply overrides an element within this data array sitting at position "i".
    -
    -
    Parameters:
    +
    +
    Parameters:
    i - The index of the value array entry which ought to be addressed.
    o - The object which ought to be placed at the requested position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining.
    -
  • -
  • -
    -

    setItems

    -
    Nda<T> setItems(Object value)
    +
+ + + +
    +
  • +

    setItems

    +
    Nda<T> setItems(java.lang.Object value)
    This method will receive an object an try to interpret it or its contents to be set as value for this nd-array. It will not necessarily replace the underlying data array object of this nd-array itself, but also try to convert and copy the provided value into the data array of this nd-array.
    -
    -
    Parameters:
    +
    +
    Parameters:
    value - The value which may be a scalar or array and will be used to populate this nd-array.
    -
    Returns:
    +
    Returns:
    This very nd-array to enable method chaining.
    -
  • -
  • -
    -

    toType

    -
    <V> Nda<V> toType(Class<V> typeClass)
    +
+ + + +
    +
  • +

    toType

    +
    <V> Nda<V> toType(java.lang.Class<V> typeClass)
    This method is an inline operation which changes the underlying data of this tensor. It converts the data types of the elements of this tensor to the specified type!

    @@ -457,21 +565,24 @@

    toType

    Therefore, there might be unexpected performance penalties or side effects associated with this method.

    -
    -
    Type Parameters:
    +
    +
    Type Parameters:
    V - The type parameter for the returned tensor.
    -
    Parameters:
    +
    Parameters:
    typeClass - The target type class for elements of this tensor.
    -
    Returns:
    +
    Returns:
    The same tensor instance whose data has been converted to hold a different type.
    -
  • -
  • -
    -

    getData

    -
    Data<T> getData()
    -
    At the heart of every tensor is the Data object, which holds the actual data array, +
+ + + +
    +
  • +

    getData

    +
    Data<T> getData()
    +
    At the heart of every tensor is the Data object, which holds the actual data array, a sequence of values of the same type. This method returns the data object of this nd-array.

    @@ -479,74 +590,91 @@

    getData

    It should be used for reading the data array of this nd-array. Modifying the data array of this nd-array can have side effects on other tensors because they might share the same data array (the might be slices).
    -
    -
    Returns:
    +
    +
    Returns:
    The data object of this nd-array.
    -
  • -
  • -
    -

    getDataAs

    -
    <A> A getDataAs(Class<A> arrayTypeClass)
    +
+ + + +
    +
  • +

    getDataAs

    +
    <A> A getDataAs(java.lang.Class<A> arrayTypeClass)
    This method returns the data of this nd-array as a Java array of the specified type.

    Warning: This method is not intended to be used for modifying the data array of this nd-array!

    -
    -
    Returns:
    +
    +
    Returns:
    The data of this nd-array as a Java array of the specified type.
    -
  • -
  • -
    -

    assign

    -
    Nda<T> assign(T other)
    +
+ + + + + +
    +
  • +

    assign

    +
    Nda<T> assign(T other)
    Use this to assign the provided item to all elements of this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
    -
    -
    Parameters:
    +
    +
    Parameters:
    other - The item which ought to be assigned to all elements of this nd-array.
    -
    Returns:
    +
    Returns:
    This very nd-array to enable method chaining.
    -
  • -
  • -
    -

    assign

    -
    Nda<T> assign(Nda<T> other)
    +
+ + + +
    +
  • +

    assign

    +
    Nda<T> assign(Nda<T> other)
    Use this to assign the provided nd-array to this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
    -
    -
    Parameters:
    +
    +
    Parameters:
    other - The nd-array which ought to be assigned to this nd-array.
    -
    Returns:
    +
    Returns:
    This very nd-array to enable method chaining.
    -
  • -
  • -
    -

    label

    -
    Nda<T> label(String label)
    +
+ + + +
    +
  • +

    label

    +
    Nda<T> label(java.lang.String label)
    Sets the label of this nd-array. The label is a human-readable string which can be used to identify this nd-array for example in a pretty-printed output.
    -
    -
    Parameters:
    +
    +
    Parameters:
    label - The label of this nd-array.
    -
    Returns:
    +
    Returns:
    This very nd-array to enable method chaining.
    -
  • -
  • -
    -

    labelAxes

    -
    Nda<T> labelAxes(String[]... labels)
    -
    This method receives a nested String array which +
+ + + +
    +
  • +

    labelAxes

    +
    Nda<T> labelAxes(java.lang.String[]... labels)
    +
    This method receives a nested String array which ought to contain a label for the index of this tensor. The index for a single element of this tensor would be an array of numbers as long as the rank where every number is @@ -558,19 +686,22 @@

    labelAxes

    dim 0 : ["A", "B"]
    dim 1 : ["1", "2", "3"]

    -
    -
    Parameters:
    +
    +
    Parameters:
    labels - A nested String array containing labels for indexes of the tensor dimensions.
    -
    Returns:
    +
    Returns:
    This tensor (method chaining).
    -
  • -
  • -
    -

    labelAxes

    -
    Nda<T> labelAxes(List<List<Object>> labels)
    -
    This method receives a nested String list which +
+ + + +
    +
  • +

    labelAxes

    +
    Nda<T> labelAxes(java.util.List<java.util.List<java.lang.Object>> labels)
    +
    This method receives a nested String list which ought to contain a label for the index of this tensor. The index for a single element of this tensor would be an array of numbers as long as the rank where every number is @@ -582,18 +713,21 @@

    labelAxes

    dim 0 : ["A", "B"]
    dim 1 : ["1", "2", "3"]

    -
    -
    Parameters:
    +
    +
    Parameters:
    labels - A nested String list containing labels for indexes of the tensor dimensions.
    -
    Returns:
    +
    Returns:
    This tensor (method chaining).
    -
  • -
  • -
    -

    labelAxes

    -
    Nda<T> labelAxes(Map<Object,List<Object>> labels)
    +
+ + + +
    +
  • +

    labelAxes

    +
    Nda<T> labelAxes(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels)
    This method provides the ability to label not only the indices of the shape of this tensor, but also the dimension of the shape. @@ -606,35 +740,97 @@

    labelAxes

    "dim 1" : ["1", "2", "3"]
    ]

    -
    -
    Parameters:
    +
    +
    Parameters:
    labels - A map in which the keys are dimension labels and the values are lists of index labels for the dimension.
    -
    Returns:
    +
    Returns:
    This tensor (method chaining).
    -
  • -
  • -
    -

    at

    -
    MutateNda.Item<T> at(int... indices)
    -
    Exposes the MutateNda.Item interface which allows you to get or set individual nd-array items.
    -
    -
    Parameters:
    +
+ + + +
    +
  • +

    at

    +
    MutateNda.Item<T> at(int... indices)
    +
    Exposes the MutateNda.Item interface which allows you to get or set individual nd-array items.
    +
    +
    Parameters:
    indices - The indices of the item to be returned.
    -
    Returns:
    +
    Returns:
    The item at the specified indices.
    -
- - + + +
+
- + + + + diff --git a/docs/jdocs/neureka/MutateTensor.html b/docs/jdocs/neureka/MutateTensor.html index 5b4507256..adfbe64d2 100644 --- a/docs/jdocs/neureka/MutateTensor.html +++ b/docs/jdocs/neureka/MutateTensor.html @@ -1,342 +1,423 @@ - + + - -MutateTensor (neureka 1.0.0 API) - - - - + +MutateTensor (neureka 1.0.1 API) - - - - - - + + -
- -
-
-
Package neureka
-

Interface MutateTensor<T>

+
neureka
+

Interface MutateTensor<T>

-
-
+
+
+
    +
  • +
    All Superinterfaces:
    -
    MutateNda<T>
    +
    MutateNda<T>

    -
    public interface MutateTensor<T> -extends MutateNda<T>
    +
    +
    public interface MutateTensor<T>
    +extends MutateNda<T>
    Tensors should be considered immutable, however sometimes it is important to mutate their state for performance reasons. This interface exposes several methods for mutating the state of a tensor. The usage of methods exposed by this API is generally discouraged because the exposed state can easily lead to broken tensors and exceptions...

    -
-
-
    + +
+
+
+ - -
-
    +
+
+
    +
  • -
  • -
    -

    Method Details

    -
      -
    • -
      -

      putAt

      -
      Tensor<T> putAt(Map<?,Integer> key, - Nda<T> value)
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          putAt

          +
          Tensor<T> putAt(java.util.Map<?,java.lang.Integer> key,
          +                Nda<T> value)
          This method enables assigning a provided nd-array to be a subset/slice of this nd-array! It takes a key which is used to configure a slice sharing the same underlying data as the original nd-array. @@ -351,224 +432,279 @@

          putAt

          i... start indexAlias.
          j... end indexAlias. (inclusive!)
          k... step size.
          -
          -
          Specified by:
          -
          putAt in interface MutateNda<T>
          -
          Parameters:
          +
          +
          Specified by:
          +
          putAt in interface MutateNda<T>
          +
          Parameters:
          key - This object is a map defining a step and a targeted index or range of indices...
          value - The nd-array which ought to be assigned into a slice of this nd-array.
          -
          Returns:
          +
          Returns:
          A slice nd-array or scalar value.
          -
    • -
    • -
      -

      putAt

      -
      Tensor<T> putAt(int[] indices, - T value)
      +
    + + + + + +
      +
    • +

      putAt

      +
      Tensor<T> putAt(int[] indices,
      +                T value)
      Use this to put a single item at a particular position within this nd-array.
      -
      -
      Specified by:
      -
      putAt in interface MutateNda<T>
      -
      Parameters:
      +
      +
      Specified by:
      +
      putAt in interface MutateNda<T>
      +
      Parameters:
      indices - The indices of the nd-position where the provided item should be placed.
      value - The item which should be placed at the position defined by the provided indices.
      -
      Returns:
      +
      Returns:
      This nd-array itself.
      -
  • -
  • -
    -

    set

    -
    default Tensor<T> set(int[] indices, - T value)
    +
+ + + + + +
    +
  • +

    set

    +
    default Tensor<T> set(int[] indices,
    +                      T value)
    Use this to place a single item at a particular position within this nd-array!
    -
    -
    Specified by:
    -
    set in interface MutateNda<T>
    -
    Parameters:
    +
    +
    Specified by:
    +
    set in interface MutateNda<T>
    +
    Parameters:
    indices - An array of indices targeting a particular position in this nd-array...
    value - the value which ought to be placed at the targeted position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    - -
  • -
  • -
    -

    set

    -
    default Tensor<T> set(int i0, - int i1, - T value)
    -
    -
    Specified by:
    -
    set in interface MutateNda<T>
    -
    -
    -
  • -
  • -
    -

    set

    -
    default Tensor<T> set(int i0, - int i1, - int i2, - T value)
    -
    -
    Specified by:
    -
    set in interface MutateNda<T>
    -
    -
    -
  • -
  • -
    -

    putAt

    -
    Tensor<T> putAt(int index, - T value)
    +
  • +
+ + + + + +
    +
  • +

    set

    +
    default Tensor<T> set(int i0,
    +                      int i1,
    +                      T value)
    +
    +
    Specified by:
    +
    set in interface MutateNda<T>
    +
    +
  • +
+ + + + + +
    +
  • +

    set

    +
    default Tensor<T> set(int i0,
    +                      int i1,
    +                      int i2,
    +                      T value)
    +
    +
    Specified by:
    +
    set in interface MutateNda<T>
    +
    +
  • +
+ + + + + +
    +
  • +

    putAt

    +
    Tensor<T> putAt(int index,
    +                T value)
    Individual entries for value items in this nd-array can be set via this method.
    -
    -
    Specified by:
    -
    putAt in interface MutateNda<T>
    -
    Parameters:
    +
    +
    Specified by:
    +
    putAt in interface MutateNda<T>
    +
    Parameters:
    index - The scalar index targeting a specific value position within this nd-array which ought to be replaced by the one provided by the second parameter of this method.
    value - The item which ought to be placed at the targeted position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    set

    -
    default Tensor<T> set(int index, - T value)
    +
+ + + + + +
    +
  • +

    set

    +
    default Tensor<T> set(int index,
    +                      T value)
    Individual entries for value items in this nd-array can be set via this method.
    -
    -
    Specified by:
    -
    set in interface MutateNda<T>
    -
    Parameters:
    +
    +
    Specified by:
    +
    set in interface MutateNda<T>
    +
    Parameters:
    index - The scalar index targeting a specific value position within this nd-array which ought to be replaced by the one provided by the second parameter of this method.
    value - The item which ought to be placed at the targeted position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    putAt

    -
    Tensor<T> putAt(List<?> key, - Nda<T> value)
    +
+ + + +
    +
  • +

    putAt

    +
    Tensor<T> putAt(java.util.List<?> key,
    +                Nda<T> value)
    This method enables injecting slices of nd-array to be assigned into this nd-array! It takes a key of various types which is used to configure a slice nd-array sharing the same underlying data as the original nd-array. This slice is then used to assign the second argument to it, namely the "value" argument.
    -
    -
    Specified by:
    -
    putAt in interface MutateNda<T>
    -
    Parameters:
    +
    +
    Specified by:
    +
    putAt in interface MutateNda<T>
    +
    Parameters:
    key - This object is a list defining a targeted index or range of indices...
    value - the nd-array which ought to be assigned to a slice of this nd-array.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    putAt

    -
    Tensor<T> putAt(List<?> indices, - T value)
    +
+ + + + + +
    +
  • +

    putAt

    +
    Tensor<T> putAt(java.util.List<?> indices,
    +                T value)
    Use this to place a single item at a particular position within this nd-array!
    -
    -
    Specified by:
    -
    putAt in interface MutateNda<T>
    -
    Parameters:
    +
    +
    Specified by:
    +
    putAt in interface MutateNda<T>
    +
    Parameters:
    indices - A list of indices targeting a particular position in this nd-array...
    value - the value which ought to be placed at the targeted position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining...
    -
  • -
  • -
    -

    setItemAt

    -
    Tensor<T> setItemAt(int i, - T o)
    +
+ + + + + +
    +
  • +

    setItemAt

    +
    Tensor<T> setItemAt(int i,
    +                    T o)
    An NDArray implementation ought to have some way to selectively modify its underlying value. This method simply overrides an element within this data array sitting at position "i".
    -
    -
    Specified by:
    -
    setItemAt in interface MutateNda<T>
    -
    Parameters:
    +
    +
    Specified by:
    +
    setItemAt in interface MutateNda<T>
    +
    Parameters:
    i - The index of the value array entry which ought to be addressed.
    o - The object which ought to be placed at the requested position.
    -
    Returns:
    +
    Returns:
    This very nd-array in order to enable method chaining.
    -
  • -
  • -
    -

    setItems

    -
    Tensor<T> setItems(Object value)
    +
+ + + +
    +
  • +

    setItems

    +
    Tensor<T> setItems(java.lang.Object value)
    This method will receive an object an try to interpret it or its contents to be set as value for this nd-array. It will not necessarily replace the underlying data array object of this nd-array itself, but also try to convert and copy the provided value into the data array of this nd-array.
    -
    -
    Specified by:
    -
    setItems in interface MutateNda<T>
    -
    Parameters:
    +
    +
    Specified by:
    +
    setItems in interface MutateNda<T>
    +
    Parameters:
    value - The value which may be a scalar or array and will be used to populate this nd-array.
    -
    Returns:
    +
    Returns:
    This very nd-array to enable method chaining.
    -
  • -
  • -
    -

    addToGradient

    -
    Tensor<T> addToGradient(Tensor<T> error)
    -
    This method takes the provided Tensor instance and adds its - contents to the contents of the Tensor which is set as gradient of this very Tensor.
    -
    -
    Parameters:
    +
+ + + +
    +
  • +

    addToGradient

    +
    Tensor<T> addToGradient(Tensor<T> error)
    +
    This method takes the provided Tensor instance and adds its + contents to the contents of the Tensor which is set as gradient of this very Tensor.
    +
    +
    Parameters:
    error - The error gradient which ought to be added to the gradient of this tensor.
    -
    Returns:
    +
    Returns:
    This very tensor instance to enable method chaining.
    -
  • -
  • -
    -

    setNDConf

    -
    Tensor<T> setNDConf(NDConfiguration configuration)
    +
+ + + +
    +
  • +

    setNDConf

    +
    Tensor<T> setNDConf(NDConfiguration configuration)
    This method sets the NDConfiguration of this NDArray. Therefore, it should not be used lightly as it can cause major internal inconsistencies.
    -
    -
    Parameters:
    +
    +
    Parameters:
    configuration - The new NDConfiguration instance which ought to be set.
    -
    Returns:
    +
    Returns:
    The final instance type of this class which enables method chaining.
    -
  • -
  • -
    -

    toType

    -
    <V> Tensor<V> toType(Class<V> typeClass)
    +
+ + + +
    +
  • +

    toType

    +
    <V> Tensor<V> toType(java.lang.Class<V> typeClass)
    This method is an inline operation which changes the underlying data of this tensor. It converts the data types of the elements of this tensor to the specified type!

    @@ -584,123 +720,146 @@

    toType

    Therefore, there might be unexpected performance penalties or side effects associated with this method.

    -
    -
    Specified by:
    -
    toType in interface MutateNda<T>
    -
    Type Parameters:
    +
    +
    Specified by:
    +
    toType in interface MutateNda<T>
    +
    Type Parameters:
    V - The type parameter for the returned tensor.
    -
    Parameters:
    +
    Parameters:
    typeClass - The target type class for elements of this tensor.
    -
    Returns:
    +
    Returns:
    The same tensor instance whose data has been converted to hold a different type.
    -
  • -
  • -
    -

    upcast

    -
    <U> Tensor<U> upcast(Class<U> superType)
    +
+ + + +
    +
  • +

    upcast

    +
    <U> Tensor<U> upcast(java.lang.Class<U> superType)
    Use this to do a runtime checked upcast of the type parameter of the tensor. - This is unsafe because it is in conflict with the Nda.itemType() + This is unsafe because it is in conflict with the Nda.itemType() method.
    -
    -
    Type Parameters:
    +
    +
    Type Parameters:
    U - The super type parameter of the value type of the tensor.
    -
    Parameters:
    +
    Parameters:
    superType - The class of the super type of the tensor's value type.
    -
    Returns:
    +
    Returns:
    A tensor whose type parameter is upcast.
    -
  • -
  • -
    -

    toLayout

    -
    Tensor<T> toLayout(NDConfiguration.Layout layout)
    +
+ + + +
    +
  • +

    toLayout

    +
    Tensor<T> toLayout(NDConfiguration.Layout layout)
    This method allows you to modify the data-layout of this AbstractNda. Warning! The method should not be used unless absolutely necessary. This is because it can cause unpredictable side effects especially for certain operations expecting a particular data layout (like for example matrix multiplication).
    -
    -
    Parameters:
    +
    +
    Parameters:
    layout - The layout of the data array (row or column major).
    -
    Returns:
    +
    Returns:
    The final instance type of this class which enables method chaining.
    -
  • -
  • -
    -

    incrementVersion

    -
    Tensor<T> incrementVersion(ExecutionCall<?> call)
    +
+ + + +
    +
  • +

    incrementVersion

    +
    Tensor<T> incrementVersion(ExecutionCall<?> call)
    This method is responsible for incrementing the "_version" field variable which represents the version of the data of this tensor. Meaning : Every time the underlying data (_value) changes this version ought to increment alongside. The method is called during the execution procedure.
    -
    -
    Parameters:
    +
    +
    Parameters:
    call - The context object containing all relevant information that defines a call for tensor execution.
    -
    Returns:
    +
    Returns:
    This very tensor instance. (factory pattern)
    -
  • -
  • -
    -

    setIsIntermediate

    -
    Tensor<T> setIsIntermediate(boolean isIntermediate)
    +
+ + + +
    +
  • +

    setIsIntermediate

    +
    Tensor<T> setIsIntermediate(boolean isIntermediate)
    Intermediate tensors are internal non-user tensors which may be eligible - for deletion when further consumed by a Function. + for deletion when further consumed by a Function. For the casual user of Neureka, this flag should always be false!
    -
    -
    Parameters:
    +
    +
    Parameters:
    isIntermediate - The truth value determining if this tensor is not a user tensor but an internal - tensor which may be eligible for deletion by Functions consuming it.
    -
    Returns:
    + tensor which may be eligible for deletion by Functions consuming it. +
    Returns:
    The tensor to which this unsafe API belongs.
    -
  • -
  • -
    -

    delete

    -
    Tensor<T> delete()
    +
+ + + +
    +
  • +

    delete

    +
    Tensor<T> delete()
    Although tensors will be garbage collected when they are not strongly referenced, there is also the option to manually free up the tensor and its associated data in a native environment. - This is especially useful when tensors are stored on a device like the OpenCLDevice. + This is especially useful when tensors are stored on a device like the OpenCLDevice. In that case calling this method will free the memory reserved for this tensor on the device. This manual memory freeing through this method can be faster than waiting for the garbage collector to kick in at a latr point in time...

    -
    -
    Returns:
    +
    +
    Returns:
    The tensor wo which this unsafe API belongs to allow for method chaining.
    -
  • -
  • -
    -

    setDataAt

    -
    Tensor<T> setDataAt(int i, - T o)
    +
+ + + + + +
    +
  • +

    setDataAt

    +
    Tensor<T> setDataAt(int i,
    +                    T o)
    A tensor ought to have some way to selectively modify its underlying data array. This method simply overrides an element within this data array sitting at position "i".
    -
    -
    Parameters:
    +
    +
    Parameters:
    i - The index of the data array entry which ought to be addressed.
    o - The object which ought to be placed at the requested position.
    -
    Returns:
    +
    Returns:
    This very tensor in order to enable method chaining.
    -
  • -
  • -
    -

    setData

    -
    Tensor<T> setData(Data<T> data)
    -
    At the heart of every tensor is the Data object, which holds the actual data array, +
+ + + +
    +
  • +

    setData

    +
    Tensor<T> setData(Data<T> data)
    +
    At the heart of every tensor is the Data object, which holds the actual data array, a sequence of values of the same type. This method allows you to set the data of this tensor to a new data object. Changing the data object of a tensor will not change the shape of the tensor and how @@ -709,173 +868,218 @@

    setData

    Warning! This method should not be used unless absolutely necessary. This is because it can cause unpredictable side effects especially for certain operations expecting a particular data layout (like for example matrix multiplication).
    -
    -
    Parameters:
    +
    +
    Parameters:
    data - The new data object which ought to be set.
    -
    Returns:
    +
    Returns:
    The tensor in question, to allow for method chaining.
    -
  • -
  • -
    -

    getDataForWriting

    -
    default <A> A getDataForWriting(Class<A> arrayTypeClass)
    +
+ + + +
    +
  • +

    getDataForWriting

    +
    default <A> A getDataForWriting(java.lang.Class<A> arrayTypeClass)
    Use this to access the underlying writable data of this tensor if you want to modify it. This method will ensure that you receive an instance of whatever array type you provide or throw descriptive exceptions to make sure that any unwanted behaviour does not spread further in the backend.
    -
    -
    Type Parameters:
    +
    +
    Type Parameters:
    A - The type parameter of the provided type class.
    -
    Parameters:
    +
    Parameters:
    arrayTypeClass - The expected array type underlying the tensor.
    -
    Returns:
    +
    Returns:
    The underlying data array of this tensor.
    -
  • -
  • -
    -

    detach

    -
    Tensor<T> detach()
    +
+ + + +
    +
  • +

    detach

    +
    Tensor<T> detach()
    This method detaches this tensor from its underlying computation-graph or simply does nothing if no graph is present.
    - Nodes within a computation graph are instances of the "GraphNode" class which are also + Nodes within a computation graph are instances of the "GraphNode" class which are also simple components of the tensors they represent in the graph.
    - Therefore, "detaching" this tensor from the graph simply means removing its GraphNode component.
    -
    -
    Returns:
    + Therefore, "detaching" this tensor from the graph simply means removing its GraphNode component.
+
+
Returns:
This very instance in order to allows for a more streamline usage of this method.
- -
  • -
    -

    timesAssign

    -
    Tensor<T> timesAssign(Tensor<T> other)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      timesAssign

      +
      Tensor<T> timesAssign(Tensor<T> other)
      +
      +
      Parameters:
      other - The tensor whose elements ought to be multiplied and assigned to elements in this tensor.
      -
      Returns:
      +
      Returns:
      This instance where each value element was multiplied by the corresponding element in the provided tensor.
      -
  • -
  • -
    -

    timesAssign

    -
    Tensor<T> timesAssign(T other)
    -
    -
    Parameters:
    + + + + + + +
      +
    • +

      timesAssign

      +
      Tensor<T> timesAssign(T other)
      +
      +
      Parameters:
      other - The value which ought to be multiplied and assigned to each element in this tensor.
      -
      Returns:
      +
      Returns:
      This instance where each value element was multiplied by the provided element.
      -
  • -
  • -
    -

    divAssign

    -
    Tensor<T> divAssign(Tensor<T> other)
    -
    + + + + + + + + + + + + +
      +
    • +

      plusAssign

      +
      Tensor<T> plusAssign(Tensor<T> other)
      Performs an addition of the passed tensor to this tensor. The result of the addition will be stored in this tensor (inline operation).
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The tensor which ought to be added to this tensor.
      -
      Returns:
      +
      Returns:
      This tensor.
      -
    • -
    • -
      -

      minusAssign

      -
      Tensor<T> minusAssign(Tensor<T> other)
      -
      +
    + + + +
      +
    • +

      minusAssign

      +
      Tensor<T> minusAssign(Tensor<T> other)
    • -
    • -
      -

      minusAssign

      -
      Tensor<T> minusAssign(T other)
      -
      -
      Parameters:
      +
    + + + + + +
      +
    • +

      minusAssign

      +
      Tensor<T> minusAssign(T other)
      +
      +
      Parameters:
      other - The scalar value which should be subtracted from the values of this tensor.
      -
      Returns:
      +
      Returns:
      This tensor after the minus-assign inline operation was applied.
      -
    • -
    • -
      -

      assign

      -
      Tensor<T> assign(T other)
      -
      Description copied from interface: MutateNda
      +
    + + + + + +
      +
    • +

      assign

      +
      Tensor<T> assign(T other)
      +
      Description copied from interface: MutateNda
      Use this to assign the provided item to all elements of this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
      -
      -
      Specified by:
      -
      assign in interface MutateNda<T>
      -
      Parameters:
      +
      +
      Specified by:
      +
      assign in interface MutateNda<T>
      +
      Parameters:
      other - The item which ought to be assigned to all elements of this nd-array.
      -
      Returns:
      +
      Returns:
      This very nd-array to enable method chaining.
      -
    • -
    • -
      -

      assign

      -
      Tensor<T> assign(Nda<T> other)
      -
      Description copied from interface: MutateNda
      +
    + + + +
      +
    • +

      assign

      +
      Tensor<T> assign(Nda<T> other)
      +
      Description copied from interface: MutateNda
      Use this to assign the provided nd-array to this nd-array! This method is an inline operation which changes the underlying data of the nd-array.
      -
      -
      Specified by:
      -
      assign in interface MutateNda<T>
      -
      Parameters:
      +
      +
      Specified by:
      +
      assign in interface MutateNda<T>
      +
      Parameters:
      other - The nd-array which ought to be assigned to this nd-array.
      -
      Returns:
      +
      Returns:
      This very nd-array to enable method chaining.
      -
    • -
    • -
      -

      label

      -
      Tensor<T> label(String label)
      +
    + + + +
      +
    • +

      label

      +
      Tensor<T> label(java.lang.String label)
      Sets the label of this nd-array. The label is a human-readable string which can be used to identify this nd-array for example in a pretty-printed output.
      -
      -
      Specified by:
      -
      label in interface MutateNda<T>
      -
      Parameters:
      +
      +
      Specified by:
      +
      label in interface MutateNda<T>
      +
      Parameters:
      label - The label of this nd-array.
      -
      Returns:
      +
      Returns:
      This very nd-array to enable method chaining.
      -
    • -
    • -
      -

      labelAxes

      -
      Tensor<T> labelAxes(String[]... labels)
      +
    + + + +
      +
    • +

      labelAxes

      +
      Tensor<T> labelAxes(java.lang.String[]... labels)
      This method receives a label for this tensor and a - nested String array which ought to contain a + nested String array which ought to contain a label for the index of this tensor. The index for a single element of this tensor would be an array of numbers as long as the rank where every number is @@ -887,21 +1091,24 @@

      labelAxes

      dim 0 : ["A", "B"]
      dim 1 : ["1", "2", "3"]

      -
      -
      Specified by:
      -
      labelAxes in interface MutateNda<T>
      -
      Parameters:
      +
      +
      Specified by:
      +
      labelAxes in interface MutateNda<T>
      +
      Parameters:
      labels - A nested String array containing labels for indexes of the tensor dimensions.
      -
      Returns:
      +
      Returns:
      This tensor (method chaining).
      -
    • -
    • -
      -

      labelAxes

      -
      Tensor<T> labelAxes(List<List<Object>> labels)
      -
      This method receives a nested String list which +
    + + + +
      +
    • +

      labelAxes

      +
      Tensor<T> labelAxes(java.util.List<java.util.List<java.lang.Object>> labels)
      +
      This method receives a nested String list which ought to contain a label for the index of this tensor. The index for a single element of this tensor would be an array of numbers as long as the rank where every number is @@ -913,20 +1120,23 @@

      labelAxes

      dim 0 : ["A", "B"]
      dim 1 : ["1", "2", "3"]

      -
      -
      Specified by:
      -
      labelAxes in interface MutateNda<T>
      -
      Parameters:
      +
      +
      Specified by:
      +
      labelAxes in interface MutateNda<T>
      +
      Parameters:
      labels - A nested String list containing labels for indexes of the tensor dimensions.
      -
      Returns:
      +
      Returns:
      This tensor (method chaining).
      -
    • -
    • -
      -

      labelAxes

      -
      Tensor<T> labelAxes(Map<Object,List<Object>> labels)
      +
    + + + +
      +
    • +

      labelAxes

      +
      Tensor<T> labelAxes(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels)
      This method provides the ability to label not only the indices of the shape of this tensor, but also the dimension of the shape. @@ -939,20 +1149,23 @@

      labelAxes

      "dim 1" : ["1", "2", "3"]
      ]

      -
      -
      Specified by:
      -
      labelAxes in interface MutateNda<T>
      -
      Parameters:
      +
      +
      Specified by:
      +
      labelAxes in interface MutateNda<T>
      +
      Parameters:
      labels - A map in which the keys are dimension labels and the values are lists of index labels for the dimension.
      -
      Returns:
      +
      Returns:
      This tensor (method chaining).
      -
    • -
    • -
      -

      setIsVirtual

      -
      Tensor<T> setIsVirtual(boolean isVirtual)
      +
    + + + +
      +
    • +

      setIsVirtual

      +
      Tensor<T> setIsVirtual(boolean isVirtual)
      Virtualizing is the opposite to actualizing a tensor. A tensor is virtual if the size of the underlying data is not actually equal to the number of elements which the tensor claims to store, aka its size. @@ -968,23 +1181,82 @@

      setIsVirtual

      This only makes sense for homogeneously populated tensors. Passing false to this method will "actualize" a "virtual" tensor. Meaning the underlying data array will at least become as large as the size of the tensor - as is defined by NDimensional.size().
      -
      -
      Parameters:
      + as is defined by NDimensional.size().
  • +
    +
    Parameters:
    isVirtual - The truth value determining if this tensor should be "virtual" or "actual".
    -
    Returns:
    +
    Returns:
    This concrete instance, to allow for method chaining.
    - - - + + + + - + + + + diff --git a/docs/jdocs/neureka/MutateTsr.html b/docs/jdocs/neureka/MutateTsr.html deleted file mode 100644 index 51d5a71e1..000000000 --- a/docs/jdocs/neureka/MutateTsr.html +++ /dev/null @@ -1,1327 +0,0 @@ - - - - - -MutateTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    -
    Package neureka
    -

    Interface MutateTsr<T>

    -
    -
    -
    -
      -
    • -
      -
      All Superinterfaces:
      -
      MutateNda<T>
      -
      -
      -
      public interface MutateTsr<T>
      -extends MutateNda<T>
      -
      Tensors should be considered immutable, however sometimes it - is important to mutate their state for performance reasons. - This interface exposes several methods for mutating the state of a tensor. - The usage of methods exposed by this API is generally discouraged - because the exposed state can easily lead to broken tensors and exceptions...
      -
      -
    • -
    -
    -
    -
      -
    • - -
      - -
      - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethodDescription
        Tsr<T>addToGradient​(Tsr<T> error) -
        This method takes the provided Tsr instance and adds its - contents to the contents of the Tsr which is set as gradient of this very Tsr.
        -
        Tsr<T>assign​(Nda<T> other) -
        Use this to assign the provided nd-array to this nd-array! - This method is an inline operation which changes the underlying data of the nd-array.
        -
        Tsr<T>assign​(T other) -
        Use this to assign the provided item to all elements of this nd-array! - This method is an inline operation which changes the underlying data of the nd-array.
        -
        Tsr<T>delete() -
        Although tensors will be garbage collected when they are not strongly referenced, - there is also the option to manually free up the tensor and its associated data in a native environment.
        -
        Tsr<T>detach() -
        This method detaches this tensor from its underlying computation-graph - or simply does nothing if no graph is present.
        - Nodes within a computation graph are instances of the "GraphNode" class which are also - simple components of the tensors they represent in the graph.
        -
        Tsr<T>divAssign​(Tsr<T> other) 
        default <A> AgetDataForWriting​(java.lang.Class<A> arrayTypeClass) -
        Use this to access the underlying writable data of this tensor if - you want to modify it.
        -
        Tsr<T>incrementVersion​(ExecutionCall<?> call) -
        This method is responsible for incrementing - the "_version" field variable which represents the version of the data of this tensor.
        -
        Tsr<T>label​(java.lang.String label) -
        Sets the label of this nd-array.
        -
        Tsr<T>labelAxes​(java.lang.String[]... labels) -
        This method receives a label for this tensor and a - nested String array which ought to contain a - label for the index of this tensor.
        -
        Tsr<T>labelAxes​(java.util.List<java.util.List<java.lang.Object>> labels) -
        This method receives a nested String list which - ought to contain a label for the index of this tensor.
        -
        Tsr<T>labelAxes​(java.util.Map<java.lang.Object,​java.util.List<java.lang.Object>> labels) -
        This method provides the ability to - label not only the indices of the shape of this tensor, but also - the dimension of the shape.
        -
        Tsr<T>minusAssign​(Tsr<T> other) 
        Tsr<T>minusAssign​(T other) 
        Tsr<T>modAssign​(Tsr<T> other) 
        Tsr<T>plusAssign​(Tsr<T> other) -
        Performs an addition of the passed tensor to this tensor.
        -
        Tsr<T>putAt​(int[] indices, - T value) -
        Use this to put a single item at a particular - position within this nd-array.
        -
        Tsr<T>putAt​(int index, - T value) -
        Individual entries for value items in this nd-array can be set - via this method.
        -
        Tsr<T>putAt​(java.util.List<?> key, - Nda<T> value) -
        This method enables injecting slices of nd-array to be assigned into this nd-array! - It takes a key of various types which is used to configure a slice - nd-array sharing the same underlying data as the original nd-array.
        -
        Tsr<T>putAt​(java.util.List<?> indices, - T value) -
        Use this to place a single item at a particular position within this nd-array!
        -
        Tsr<T>putAt​(java.util.Map<?,​java.lang.Integer> key, - Nda<T> value) -
        This method enables assigning a provided nd-array to be a subset/slice of this nd-array! - It takes a key which is used to configure a slice - sharing the same underlying data as the original nd-array.
        -
        default Tsr<T>set​(int[] indices, - T value) -
        Use this to place a single item at a particular position within this nd-array!
        -
        default Tsr<T>set​(int i0, - int i1, - int i2, - T value)
        default Tsr<T>set​(int i0, - int i1, - T value)
        default Tsr<T>set​(int index, - T value) -
        Individual entries for value items in this nd-array can be set - via this method.
        -
        Tsr<T>setData​(Data<T> data) -
        At the heart of every tensor is the Data object, which holds the actual data array, - a sequence of values of the same type.
        -
        Tsr<T>setDataAt​(int i, - T o) -
        A tensor ought to have some way to selectively modify its underlying data array.
        -
        Tsr<T>setIsIntermediate​(boolean isIntermediate) -
        Intermediate tensors are internal non-user tensors which may be eligible - for deletion when further consumed by a Function.
        -
        Tsr<T>setIsVirtual​(boolean isVirtual) -
        Virtualizing is the opposite to actualizing a tensor.
        -
        Tsr<T>setItemAt​(int i, - T o) -
        An NDArray implementation ought to have some way to selectively modify its underlying value.
        -
        Tsr<T>setItems​(java.lang.Object value) -
        This method will receive an object an try to interpret - it or its contents to be set as value for this nd-array.
        -
        Tsr<T>setNDConf​(NDConfiguration configuration) -
        This method sets the NDConfiguration of this NDArray.
        -
        Tsr<T>timesAssign​(Tsr<T> other) 
        Tsr<T>timesAssign​(T other) 
        Tsr<T>toLayout​(NDConfiguration.Layout layout) -
        This method allows you to modify the data-layout of this AbstractNda.
        -
        <V> Tsr<V>toType​(java.lang.Class<V> typeClass) -
        This method is an inline operation which changes the underlying data of this tensor.
        -
        <U> Tsr<U>upcast​(java.lang.Class<U> superType) -
        Use this to do a runtime checked upcast of the type parameter of the tensor.
        -
        - -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          putAt

          -
          Tsr<T> putAt​(java.util.Map<?,​java.lang.Integer> key,
          -             Nda<T> value)
          -
          This method enables assigning a provided nd-array to be a subset/slice of this nd-array! - It takes a key which is used to configure a slice - sharing the same underlying data as the original nd-array. - This slice is then used to assign the second argument value to it. - The usage of this method is especially powerful when used in Groovy.
          - The following code illustrates this very well: -
          
          -      a[[[0..0]:1, [0..0]:1, [0..3]:2]] = b
          -  
          - Here a single argument with the format '[i..j]:k' is equivalent - to pythons 'i:j:k' syntax for indexing! (numpy)
          - i... start indexAlias.
          - j... end indexAlias. (inclusive!)
          - k... step size.
          -
          -
          Specified by:
          -
          putAt in interface MutateNda<T>
          -
          Parameters:
          -
          key - This object is a map defining a step and a targeted index or range of indices...
          -
          value - The nd-array which ought to be assigned into a slice of this nd-array.
          -
          Returns:
          -
          A slice nd-array or scalar value.
          -
          -
        • -
        - - - - - -
          -
        • -

          putAt

          -
          Tsr<T> putAt​(int[] indices,
          -             T value)
          -
          Use this to put a single item at a particular - position within this nd-array.
          -
          -
          Specified by:
          -
          putAt in interface MutateNda<T>
          -
          Parameters:
          -
          indices - The indices of the nd-position where the provided item should be placed.
          -
          value - The item which should be placed at the position defined by the provided indices.
          -
          Returns:
          -
          This nd-array itself.
          -
          -
        • -
        - - - - - -
          -
        • -

          set

          -
          default Tsr<T> set​(int[] indices,
          -                   T value)
          -
          Use this to place a single item at a particular position within this nd-array!
          -
          -
          Specified by:
          -
          set in interface MutateNda<T>
          -
          Parameters:
          -
          indices - An array of indices targeting a particular position in this nd-array...
          -
          value - the value which ought to be placed at the targeted position.
          -
          Returns:
          -
          This very nd-array in order to enable method chaining...
          -
          -
        • -
        - - - - - -
          -
        • -

          set

          -
          default Tsr<T> set​(int i0,
          -                   int i1,
          -                   T value)
          -
          -
          Specified by:
          -
          set in interface MutateNda<T>
          -
          -
        • -
        - - - - - -
          -
        • -

          set

          -
          default Tsr<T> set​(int i0,
          -                   int i1,
          -                   int i2,
          -                   T value)
          -
          -
          Specified by:
          -
          set in interface MutateNda<T>
          -
          -
        • -
        - - - - - -
          -
        • -

          putAt

          -
          Tsr<T> putAt​(int index,
          -             T value)
          -
          Individual entries for value items in this nd-array can be set - via this method.
          -
          -
          Specified by:
          -
          putAt in interface MutateNda<T>
          -
          Parameters:
          -
          index - The scalar index targeting a specific value position within this nd-array - which ought to be replaced by the one provided by the second parameter - of this method.
          -
          value - The item which ought to be placed at the targeted position.
          -
          Returns:
          -
          This very nd-array in order to enable method chaining...
          -
          -
        • -
        - - - - - -
          -
        • -

          set

          -
          default Tsr<T> set​(int index,
          -                   T value)
          -
          Individual entries for value items in this nd-array can be set - via this method.
          -
          -
          Specified by:
          -
          set in interface MutateNda<T>
          -
          Parameters:
          -
          index - The scalar index targeting a specific value position within this nd-array - which ought to be replaced by the one provided by the second parameter - of this method.
          -
          value - The item which ought to be placed at the targeted position.
          -
          Returns:
          -
          This very nd-array in order to enable method chaining...
          -
          -
        • -
        - - - -
          -
        • -

          putAt

          -
          Tsr<T> putAt​(java.util.List<?> key,
          -             Nda<T> value)
          -
          This method enables injecting slices of nd-array to be assigned into this nd-array! - It takes a key of various types which is used to configure a slice - nd-array sharing the same underlying data as the original nd-array. - This slice is then used to assign the second argument to it, namely - the "value" argument.
          -
          -
          Specified by:
          -
          putAt in interface MutateNda<T>
          -
          Parameters:
          -
          key - This object is a list defining a targeted index or range of indices...
          -
          value - the nd-array which ought to be assigned to a slice of this nd-array.
          -
          Returns:
          -
          This very nd-array in order to enable method chaining...
          -
          -
        • -
        - - - - - -
          -
        • -

          putAt

          -
          Tsr<T> putAt​(java.util.List<?> indices,
          -             T value)
          -
          Use this to place a single item at a particular position within this nd-array!
          -
          -
          Specified by:
          -
          putAt in interface MutateNda<T>
          -
          Parameters:
          -
          indices - A list of indices targeting a particular position in this nd-array...
          -
          value - the value which ought to be placed at the targeted position.
          -
          Returns:
          -
          This very nd-array in order to enable method chaining...
          -
          -
        • -
        - - - - - -
          -
        • -

          setItemAt

          -
          Tsr<T> setItemAt​(int i,
          -                 T o)
          -
          An NDArray implementation ought to have some way to selectively modify its underlying value. - This method simply overrides an element within this data array sitting at position "i".
          -
          -
          Specified by:
          -
          setItemAt in interface MutateNda<T>
          -
          Parameters:
          -
          i - The index of the value array entry which ought to be addressed.
          -
          o - The object which ought to be placed at the requested position.
          -
          Returns:
          -
          This very nd-array in order to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          setItems

          -
          Tsr<T> setItems​(java.lang.Object value)
          -
          This method will receive an object an try to interpret - it or its contents to be set as value for this nd-array. - It will not necessarily replace the underlying data array object of this - nd-array itself, but also try to convert and copy the provided value - into the data array of this nd-array.
          -
          -
          Specified by:
          -
          setItems in interface MutateNda<T>
          -
          Parameters:
          -
          value - The value which may be a scalar or array and will be used to populate this nd-array.
          -
          Returns:
          -
          This very nd-array to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          addToGradient

          -
          Tsr<T> addToGradient​(Tsr<T> error)
          -
          This method takes the provided Tsr instance and adds its - contents to the contents of the Tsr which is set as gradient of this very Tsr.
          -
          -
          Parameters:
          -
          error - The error gradient which ought to be added to the gradient of this tensor.
          -
          Returns:
          -
          This very tensor instance to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          setNDConf

          -
          Tsr<T> setNDConf​(NDConfiguration configuration)
          -
          This method sets the NDConfiguration of this NDArray. - Therefore, it should not be used lightly as it can cause major internal inconsistencies.
          -
          -
          Parameters:
          -
          configuration - The new NDConfiguration instance which ought to be set.
          -
          Returns:
          -
          The final instance type of this class which enables method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          toType

          -
          <V> Tsr<V> toType​(java.lang.Class<V> typeClass)
          -
          This method is an inline operation which changes the underlying data of this tensor. - It converts the data types of the elements of this tensor to the specified type!
          -
          - WARNING : The usage of this method is discouraged for the following reasons:
          -
          - 1. Inline operations are inherently error-prone for most use cases.
          - 2. This inline operation in particular has no safety net, - meaning that there is no implementation of version mismatch detection - like there is for those operations present in the standard operation backend... - No exceptions will be thrown during backpropagation!
          - 3. This method has not yet been implemented to also handle instances which - are slices of parent tensors! - Therefore, there might be unexpected performance penalties or side effects - associated with this method.
          -
          -
          -
          Specified by:
          -
          toType in interface MutateNda<T>
          -
          Type Parameters:
          -
          V - The type parameter for the returned tensor.
          -
          Parameters:
          -
          typeClass - The target type class for elements of this tensor.
          -
          Returns:
          -
          The same tensor instance whose data has been converted to hold a different type.
          -
          -
        • -
        - - - -
          -
        • -

          upcast

          -
          <U> Tsr<U> upcast​(java.lang.Class<U> superType)
          -
          Use this to do a runtime checked upcast of the type parameter of the tensor. - This is unsafe because it is in conflict with the Nda.itemType() - method.
          -
          -
          Type Parameters:
          -
          U - The super type parameter of the value type of the tensor.
          -
          Parameters:
          -
          superType - The class of the super type of the tensor's value type.
          -
          Returns:
          -
          A tensor whose type parameter is upcast.
          -
          -
        • -
        - - - -
          -
        • -

          toLayout

          -
          Tsr<T> toLayout​(NDConfiguration.Layout layout)
          -
          This method allows you to modify the data-layout of this AbstractNda. - Warning! The method should not be used unless absolutely necessary. - This is because it can cause unpredictable side effects especially for certain - operations expecting a particular data layout (like for example matrix multiplication). -
          -
          -
          Parameters:
          -
          layout - The layout of the data array (row or column major).
          -
          Returns:
          -
          The final instance type of this class which enables method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          incrementVersion

          -
          Tsr<T> incrementVersion​(ExecutionCall<?> call)
          -
          This method is responsible for incrementing - the "_version" field variable which represents the version of the data of this tensor. - Meaning : - Every time the underlying data (_value) changes this version ought to increment alongside. - The method is called during the execution procedure.
          -
          -
          Parameters:
          -
          call - The context object containing all relevant information that defines a call for tensor execution.
          -
          Returns:
          -
          This very tensor instance. (factory pattern)
          -
          -
        • -
        - - - -
          -
        • -

          setIsIntermediate

          -
          Tsr<T> setIsIntermediate​(boolean isIntermediate)
          -
          Intermediate tensors are internal non-user tensors which may be eligible - for deletion when further consumed by a Function. - For the casual user of Neureka, this flag should always be false!
          -
          -
          Parameters:
          -
          isIntermediate - The truth value determining if this tensor is not a user tensor but an internal - tensor which may be eligible for deletion by Functions consuming it.
          -
          Returns:
          -
          The tensor to which this unsafe API belongs.
          -
          -
        • -
        - - - -
          -
        • -

          delete

          -
          Tsr<T> delete()
          -
          Although tensors will be garbage collected when they are not strongly referenced, - there is also the option to manually free up the tensor and its associated data in a native environment. - This is especially useful when tensors are stored on a device like the OpenCLDevice. - In that case calling this method will free the memory reserved for this tensor on the device. - This manual memory freeing through this method can be faster than waiting for - the garbage collector to kick in at a latr point in time...
          -
          -
          -
          Returns:
          -
          The tensor wo which this unsafe API belongs to allow for method chaining.
          -
          -
        • -
        - - - - - -
          -
        • -

          setDataAt

          -
          Tsr<T> setDataAt​(int i,
          -                 T o)
          -
          A tensor ought to have some way to selectively modify its underlying data array. - This method simply overrides an element within this data array sitting at position "i".
          -
          -
          Parameters:
          -
          i - The index of the data array entry which ought to be addressed.
          -
          o - The object which ought to be placed at the requested position.
          -
          Returns:
          -
          This very tensor in order to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          setData

          -
          Tsr<T> setData​(Data<T> data)
          -
          At the heart of every tensor is the Data object, which holds the actual data array, - a sequence of values of the same type. - This method allows you to set the data of this tensor to a new data object. - Changing the data object of a tensor will not change the shape of the tensor and how - nd-indices are mapped to the data array. -

          - Warning! This method should not be used unless absolutely necessary. - This is because it can cause unpredictable side effects especially for certain - operations expecting a particular data layout (like for example matrix multiplication).

          -
          -
          Parameters:
          -
          data - The new data object which ought to be set.
          -
          Returns:
          -
          The tensor in question, to allow for method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          getDataForWriting

          -
          default <A> A getDataForWriting​(java.lang.Class<A> arrayTypeClass)
          -
          Use this to access the underlying writable data of this tensor if - you want to modify it. - This method will ensure that you receive an instance of whatever array type you provide - or throw descriptive exceptions to make sure that any unwanted behaviour does not - spread further in the backend.
          -
          -
          Type Parameters:
          -
          A - The type parameter of the provided type class.
          -
          Parameters:
          -
          arrayTypeClass - The expected array type underlying the tensor.
          -
          Returns:
          -
          The underlying data array of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          detach

          -
          Tsr<T> detach()
          -
          This method detaches this tensor from its underlying computation-graph - or simply does nothing if no graph is present.
          - Nodes within a computation graph are instances of the "GraphNode" class which are also - simple components of the tensors they represent in the graph.
          - Therefore, "detaching" this tensor from the graph simply means removing its GraphNode component.
          -
          -
          Returns:
          -
          This very instance in order to allows for a more streamline usage of this method.
          -
          -
        • -
        - - - -
          -
        • -

          timesAssign

          -
          Tsr<T> timesAssign​(Tsr<T> other)
          -
          -
          Parameters:
          -
          other - The tensor whose elements ought to be multiplied and assigned to elements in this tensor.
          -
          Returns:
          -
          This instance where each value element was multiplied by the corresponding element in the provided tensor.
          -
          -
        • -
        - - - - - -
          -
        • -

          timesAssign

          -
          Tsr<T> timesAssign​(T other)
          -
          -
          Parameters:
          -
          other - The value which ought to be multiplied and assigned to each element in this tensor.
          -
          Returns:
          -
          This instance where each value element was multiplied by the provided element.
          -
          -
        • -
        - - - -
          -
        • -

          divAssign

          -
          Tsr<T> divAssign​(Tsr<T> other)
          -
        • -
        - - - -
          -
        • -

          modAssign

          -
          Tsr<T> modAssign​(Tsr<T> other)
          -
        • -
        - - - -
          -
        • -

          plusAssign

          -
          Tsr<T> plusAssign​(Tsr<T> other)
          -
          Performs an addition of the passed tensor to this tensor. - The result of the addition will be stored in this tensor (inline operation).
          -
          -
          Parameters:
          -
          other - The tensor which ought to be added to this tensor.
          -
          Returns:
          -
          This tensor.
          -
          -
        • -
        - - - -
          -
        • -

          minusAssign

          -
          Tsr<T> minusAssign​(Tsr<T> other)
          -
        • -
        - - - - - -
          -
        • -

          minusAssign

          -
          Tsr<T> minusAssign​(T other)
          -
          -
          Parameters:
          -
          other - The scalar value which should be subtracted from the values of this tensor.
          -
          Returns:
          -
          This tensor after the minus-assign inline operation was applied.
          -
          -
        • -
        - - - - - -
          -
        • -

          assign

          -
          Tsr<T> assign​(T other)
          -
          Description copied from interface: MutateNda
          -
          Use this to assign the provided item to all elements of this nd-array! - This method is an inline operation which changes the underlying data of the nd-array.
          -
          -
          Specified by:
          -
          assign in interface MutateNda<T>
          -
          Parameters:
          -
          other - The item which ought to be assigned to all elements of this nd-array.
          -
          Returns:
          -
          This very nd-array to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          assign

          -
          Tsr<T> assign​(Nda<T> other)
          -
          Description copied from interface: MutateNda
          -
          Use this to assign the provided nd-array to this nd-array! - This method is an inline operation which changes the underlying data of the nd-array.
          -
          -
          Specified by:
          -
          assign in interface MutateNda<T>
          -
          Parameters:
          -
          other - The nd-array which ought to be assigned to this nd-array.
          -
          Returns:
          -
          This very nd-array to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          label

          -
          Tsr<T> label​(java.lang.String label)
          -
          Sets the label of this nd-array. - The label is a human-readable string which can be used to identify this nd-array - for example in a pretty-printed output.
          -
          -
          Specified by:
          -
          label in interface MutateNda<T>
          -
          Parameters:
          -
          label - The label of this nd-array.
          -
          Returns:
          -
          This very nd-array to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          labelAxes

          -
          Tsr<T> labelAxes​(java.lang.String[]... labels)
          -
          This method receives a label for this tensor and a - nested String array which ought to contain a - label for the index of this tensor. - The index for a single element of this tensor would be an array - of numbers as long as the rank where every number is - in the range of the corresponding shape dimension... - Labeling an index means that for every dimension there - must be a label for elements in this range array!
          - For example the shape (2,3) could be labeled as follows:
          -
          - dim 0 : ["A", "B"]
          - dim 1 : ["1", "2", "3"]
          -
          -
          -
          Specified by:
          -
          labelAxes in interface MutateNda<T>
          -
          Parameters:
          -
          labels - A nested String array containing labels for indexes of the tensor dimensions.
          -
          Returns:
          -
          This tensor (method chaining).
          -
          -
        • -
        - - - -
          -
        • -

          labelAxes

          -
          Tsr<T> labelAxes​(java.util.List<java.util.List<java.lang.Object>> labels)
          -
          This method receives a nested String list which - ought to contain a label for the index of this tensor. - The index for a single element of this tensor would be an array - of numbers as long as the rank where every number is - in the range of the corresponding shape dimension... - Labeling an index means that for every dimension there - must be a label for elements in this range array!
          - For example the shape (2,3) could be labeled as follows:
          -
          - dim 0 : ["A", "B"]
          - dim 1 : ["1", "2", "3"]
          -
          -
          -
          Specified by:
          -
          labelAxes in interface MutateNda<T>
          -
          Parameters:
          -
          labels - A nested String list containing labels for indexes of the tensor dimensions.
          -
          Returns:
          -
          This tensor (method chaining).
          -
          -
        • -
        - - - -
          -
        • -

          labelAxes

          -
          Tsr<T> labelAxes​(java.util.Map<java.lang.Object,​java.util.List<java.lang.Object>> labels)
          -
          This method provides the ability to - label not only the indices of the shape of this tensor, but also - the dimension of the shape. - The first and only argument of the method expects a map instance - where keys are the objects which ought to act as dimension labels - and the values are lists of labels for the indices of said dimensions. - For example the shape (2,3) could be labeled as follows:
          - [
          - "dim 0" : ["A", "B"],
          - "dim 1" : ["1", "2", "3"]
          - ]
          -
          -
          -
          Specified by:
          -
          labelAxes in interface MutateNda<T>
          -
          Parameters:
          -
          labels - A map in which the keys are dimension labels and the values are lists of index labels for the dimension.
          -
          Returns:
          -
          This tensor (method chaining).
          -
          -
        • -
        - - - -
          -
        • -

          setIsVirtual

          -
          Tsr<T> setIsVirtual​(boolean isVirtual)
          -
          Virtualizing is the opposite to actualizing a tensor. - A tensor is virtual if the size of the underlying data is not actually equal to - the number of elements which the tensor claims to store, aka its size. - This is for example the case when initializing a tensor filled with a single - value continuously. In that case the tensor will flag itself as virtual and only allocate the - underlying data array to hold a single item even though the tensor might actually hold - many more items. - The reasons for this feature is that it greatly improves performance in certain cases. - In essence this feature is a form of lazy loading. -

          - WARNING! Virtualizing is the process of compacting the underlying data array - down to an array holding a single value item. - This only makes sense for homogeneously populated tensors. - Passing false to this method will "actualize" a "virtual" tensor. - Meaning the underlying data array will at least become as large as the size of the tensor - as is defined by NDimensional.size().
          -
          -
          Parameters:
          -
          isVirtual - The truth value determining if this tensor should be "virtual" or "actual".
          -
          Returns:
          -
          This concrete instance, to allow for method chaining.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/Nda.Item.html b/docs/jdocs/neureka/Nda.Item.html index c72f45da2..5e2804314 100644 --- a/docs/jdocs/neureka/Nda.Item.html +++ b/docs/jdocs/neureka/Nda.Item.html @@ -1,246 +1,366 @@ - + + - -Nda.Item (neureka 1.0.0 API) - - - - + +Nda.Item (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Interface Nda.Item<V>

    +
    neureka
    +

    Interface Nda.Item<V>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The type of the items of this nd-array.
      -
      +
      All Known Subinterfaces:
      -
      MutateNda.Item<V>
      +
      MutateNda.Item<V>
      -
      +
      Enclosing interface:
      -
      Nda<V>
      +
      Nda<V>

      -
      public static interface Nda.Item<V>
      -
      Instances of this are being returned by the Nda.at(int...) method, +
      +
      public static interface Nda.Item<V>
      +
      Instances of this are being returned by the Nda.at(int...) method, and they allow you to get individual nd-array items
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default boolean
      - -
       
      -
      default boolean
      - -
       
      -
      default V
      -
      get()
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default booleandoesNotExist() 
        default booleanexists() 
        default Vget()
        Get the value at the targeted position or throw an exception if the item does not exist.
        - -
        default Optional<V>
        -
        map(Function<V,V> mapper)
        -
        +
        default java.util.Optional<V>map(java.util.function.Function<V,V> mapper)
        Maps this item to an optional value based on the provided lambda.
        - -
        default V
        -
        orElse(V defaultValue)
        -
        +
        default VorElse(V defaultValue)
        Get the value at the targeted position or return the provided default value if the item does not exist.
        - - - -
        +
        VorElseNull()
        Get the value at the targeted position or return null if the item does not exist.
        - -
        default Optional<V>
        - -
        +
        default java.util.Optional<V>toOptional()
        Converts this item into an optional value.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        get

        -
        default V get()
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            get

            +
            default V get()
            Get the value at the targeted position or throw an exception if the item does not exist.
            -
            -
            Returns:
            +
            +
            Returns:
            The value at the targeted position.
            -
      • -
      • -
        -

        orElse

        -
        default V orElse(V defaultValue)
        +
      + + + + + +
        +
      • +

        orElse

        +
        default V orElse(V defaultValue)
        Get the value at the targeted position or return the provided default value if the item does not exist.
        -
        -
        Parameters:
        +
        +
        Parameters:
        defaultValue - The default value to return if the item does not exist.
        -
        Returns:
        +
        Returns:
        The value at the targeted position or the provided default value.
        -
        Throws:
        -
        IllegalArgumentException - If the provided default value is null.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - If the provided default value is null.
        -
    • -
    • -
      -

      orElseNull

      -
      V orElseNull()
      +
    + + + +
      +
    • +

      orElseNull

      +
      V orElseNull()
      Get the value at the targeted position or return null if the item does not exist.
      -
      -
      Returns:
      +
      +
      Returns:
      The value at the targeted position or null.
      -
    • -
    • -
      -

      toOptional

      -
      default Optional<V> toOptional()
      +
    + + + +
      +
    • +

      toOptional

      +
      default java.util.Optional<V> toOptional()
      Converts this item into an optional value. If the item exists, the resulting optional will contain the value. Otherwise, the resulting optional will be empty.
      -
      -
      Returns:
      +
      +
      Returns:
      An optional value.
      -
    • -
    • -
      -

      map

      -
      default Optional<V> map(Function<V,V> mapper)
      +
    + + + +
      +
    • +

      map

      +
      default java.util.Optional<V> map(java.util.function.Function<V,V> mapper)
      Maps this item to an optional value based on the provided lambda. The lambda will be executed if the item exists. If the lambda returns null the resulting optional will be empty. Otherwise, the resulting optional will contain the value returned by the lambda.
      -
      -
      Parameters:
      +
      +
      Parameters:
      mapper - The lambda which maps the item to an optional value.
      -
      Returns:
      +
      Returns:
      An optional value based on the provided lambda.
      -
    • -
    • -
      -

      exists

      -
      default boolean exists()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      exists

      +
      default boolean exists()
      +
      +
      Returns:
      true if the item exists, false otherwise.
      -
    • -
    • -
      -

      doesNotExist

      -
      default boolean doesNotExist()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      doesNotExist

      +
      default boolean doesNotExist()
      +
      +
      Returns:
      true if the item does not exist, false otherwise.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Nda.html b/docs/jdocs/neureka/Nda.html index 807b9cd79..803c910c9 100644 --- a/docs/jdocs/neureka/Nda.html +++ b/docs/jdocs/neureka/Nda.html @@ -1,683 +1,823 @@ - + + - -Nda (neureka 1.0.0 API) - - - - + +Nda (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Interface Nda<V>

    -
    -
    -
    -
    Type Parameters:
    -
    V - The type of the items stored in the Nda.
    -
    -
    +
    neureka
    +

    Interface Nda<V>

    +
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      V - The type of the items stored in the Nda.
      +
      +
      All Superinterfaces:
      -
      Iterable<V>, NDimensional
      +
      java.lang.Iterable<V>, NDimensional
      -
      +
      All Known Subinterfaces:
      -
      Tensor<V>
      +
      Tensor<V>

      -
      public interface Nda<V> -extends NDimensional, Iterable<V>
      -
      Nda, which is an abbreviation of 'N-Dimensional-Array', represents +
      +
      public interface Nda<V>
      +extends NDimensional, java.lang.Iterable<V>
      +
      Nda, which is an abbreviation of 'N-Dimensional-Array', represents a multidimensional, homogeneously filled fixed-size array of items.

      - Ndas should be constructed using the fluent builder API exposed by of(Class).

      - -
      -
        + Ndas should be constructed using the fluent builder API exposed by of(Class).
      +
    • +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Interface
      -
      Description
      -
      static interface 
      - -
      -
      Instances of this are being returned by the at(int...) method, +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeInterface and Description
        static interface Nda.Item<V> +
        Instances of this are being returned by the at(int...) method, and they allow you to get individual nd-array items
        - - - +
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default boolean
      -
      any(Predicate<V> predicate)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default booleanany(java.util.function.Predicate<V> predicate)
        Iterates over every element of this nd-array, and checks whether any element matches the provided lambda.
        - - -
        at(int... indices)
        -
        -
        This method exposes the Nda.Item API which allows you to get or set +
        Nda.Item<V>at(int... indices) +
        This method exposes the Nda.Item API which allows you to get or set individual items within this nd-array targeted by an array of provided indices.
        - - -
        concatAt(int axis, - Nda<V> other)
        -
        +
        Nda<V>concatAt(int axis, + Nda<V> other)
        This method concatenates the provided nd-array together with this nd-array along a specified axis.
        - - -
        concatAt(int axis, - Nda<V> other, - Nda<V>... ndArrays)
        -
        +
        Nda<V>concatAt(int axis, + Nda<V> other, + Nda<V>... ndArrays)
        This method concatenates the provided nd-arrays together with this nd-array along a specified axis.
        - -
        default int
        -
        count(Predicate<V> predicate)
        -
        +
        default intcount(java.util.function.Predicate<V> predicate)
        Iterates over every element of this nd-array, and counts the number of times the provided lambda matches the items of this array.
        - - - -
        +
        Nda<V>deepCopy()
        This method creates and returns a new nd-array instance which is not only a copy of the configuration of this nd-array but also a copy of the underlying data array.
        - -
        default boolean
        -
        every(Predicate<V> predicate)
        -
        +
        default booleanevery(java.util.function.Predicate<V> predicate)
        Iterates over every element of this nd-array, and checks whether all elements are true according to the provided lambda.
        - -
        default Stream<V>
        -
        filter(Predicate<V> predicate)
        -
        +
        default java.util.stream.Stream<V>filter(java.util.function.Predicate<V> predicate)
        A convenience method for stream().filter( predicate ).
        - -
        default <R> Stream<R>
        -
        flatMap(Function<V,Stream<R>> mapper)
        -
        +
        default <R> java.util.stream.Stream<R>flatMap(java.util.function.Function<V,java.util.stream.Stream<R>> mapper)
        A convenience method for nda.stream().flatMap( mapper ), - which turns this Nda into a Stream of its items.
        - - -
        get(int i)
        -
        -
        This getter method creates and returns a slice of the original nd-array.
        -
        - -
        get(int... indices)
        -
        + which turns this Nda into a Stream of its items.
        +
        Nda<V>get(int... indices)
        The following method enables access to specific scalar elements within the nd-array.
        - - - -
        +
        Nda<V>get(int i)
        This getter method creates and returns a slice of the original nd-array.
        - - -
        get(Object key)
        -
        +
        Nda<V>get(java.lang.Number i) +
        This getter method creates and returns a slice of the original nd-array.
        +
        Nda<V>get(java.lang.Object... args) +
        The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
        +
        Nda<V>get(java.lang.Object key)
        This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
        - - -
        get(Object... args)
        -
        -
        The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
        -
        - -
        getAt(int i)
        -
        -
        This getter method creates and returns a slice of the original nd-array.
        -
        - -
        getAt(int... indices)
        -
        +
        Nda<V>getAt(int... indices)
        The following method enables access to specific scalar elements within the nd-array.
        - - - -
        +
        Nda<V>getAt(int i)
        This getter method creates and returns a slice of the original nd-array.
        - - -
        getAt(Object... args)
        -
        -
        The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
        -
        - -
        getAt(List<?> key)
        -
        +
        Nda<V>getAt(java.util.List<?> key)
        This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
        - - -
        getAt(Map<?,Integer> rangToSteps)
        -
        +
        Nda<V>getAt(java.util.Map<?,java.lang.Integer> rangToSteps)
        This method is most useful when used in Groovy where defining maps is done through square brackets, making it possible to slice nd-arrays like so:
        - -
        default <A> A
        -
        getDataAs(Class<A> arrayTypeClass)
        -
        -
        Use this to get the items of the underlying Data buffer +
        Nda<V>getAt(java.lang.Number i) +
        This getter method creates and returns a slice of the original nd-array.
        +
        Nda<V>getAt(java.lang.Object... args) +
        The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
        +
        default <A> AgetDataAs(java.lang.Class<A> arrayTypeClass) +
        Use this to get the items of the underlying Data buffer of this nd-array as a primitive array of the specified type.
        - - -
        getDataAt(int i)
        -
        +
        VgetDataAt(int i)
        Use this to access elements of the underlying data array without any index transformation applied to it.
        - -
        default V
        - -
        -
        Equivalent to the #item(0) and item().
        -
        -
        default List<V>
        - -
        -
        A more verbose version of the items() method (best used by JVM languages with property support).
        -
        -
        default <A> A
        -
        getItemsAs(Class<A> arrayTypeClass)
        -
        +
        default VgetItem() +
        Equivalent to the #item(0) and item().
        +
        default java.util.List<V>getItems() +
        A more verbose version of the items() method (best used by JVM languages with property support).
        +
        default <A> AgetItemsAs(java.lang.Class<A> arrayTypeClass)
        Use this to get the items of this nd-array as a primitive array of the specified type.
        - - - -
         
        -
        default String
        - -
        +
        java.lang.Class<V>getItemType() 
        default java.lang.StringgetLabel()
        A nd-array can have a label.
        - - - -
        +
        MutateNda<V>getMut()
        This method exposes an API for mutating the state of this tensor.
        - - - -
        +
        java.lang.ObjectgetRawData()
        This returns an unprocessed version of the underlying data of this nd-array.
        - - - -
         
        -
        default boolean
        - -
        +
        java.lang.ObjectgetRawItems() 
        default booleanisFullSlice()
        If this nd-array is a full slice of a parent nd-array then this method will yield true.
        - -
        boolean
        - -
        +
        booleanisPartialSlice()
        If this nd-array is a partial slice of a parent nd-array then this method will yield true.
        - -
        boolean
        - -
        +
        booleanisShallowCopy()
        If this nd-array is a shallow copy of a parent nd-array then this method will yield true.
        - -
        boolean
        - -
        +
        booleanisSlice()
        If this nd-array is a slice of a parent nd-array then this method will yield true.
        - -
        boolean
        - -
        +
        booleanisSliceParent()
        If slices have been derived from this nd-array then it is a "slice parent".
        - -
        default V
        - -
        -
        Equivalent to the #item(0) and getItem().
        -
        -
        default V
        -
        item(int i)
        -
        -
        The following method returns a single item within this nd-array - targeted by the provided integer index.
        -
        -
        default V
        -
        item(int... indices)
        -
        +
        default Vitem() +
        Equivalent to the #item(0) and getItem().
        +
        default Vitem(int... indices)
        This method returns a raw value item within this nd-array targeted by an index array which is expected to hold an index for every dimension of the shape of this nd-array.
        - -
        default List<V>
        - -
        -
        A more concise version of the getItems() method.
        -
        -
        default Class<V>
        - -
         
        -
        default String
        - -
        +
        default Vitem(int i) +
        The following method returns a single item within this nd-array + targeted by the provided integer index.
        +
        default java.util.List<V>items() +
        A more concise version of the getItems() method.
        +
        default java.lang.Class<V>itemType() 
        default java.lang.Stringlabel()
        A nd-array can have a label.
        - - -
        map(Function<V,V> mapper)
        -
        +
        Nda<V>map(java.util.function.Function<V,V> mapper)
        This method is a convenience method for mapping the items of this nd-array to another nd-array of the same type based on the provided lambda function, which will be applied to all items of this nd-array individually (element-wise).
        - -
        <T> Nda<T>
        -
        mapTo(Class<T> typeClass, - Function<V,T> mapper)
        -
        +
        <T> Nda<T>mapTo(java.lang.Class<T> typeClass, + java.util.function.Function<V,T> mapper)
        This is a convenience method for mapping a nd-array to a nd-array of new type based on a provided target item type and mapping lambda.
        - -
        default V
        -
        maxItem(Comparator<V> comparator)
        -
        +
        default VmaxItem(java.util.Comparator<V> comparator)
        Returns the maximum item of this nd-array according to the provided - Comparator.
        - -
        default V
        -
        minItem(Comparator<V> comparator)
        -
        + Comparator.
        +
        default VminItem(java.util.Comparator<V> comparator)
        Returns the minimum item of this nd-array according to the provided - Comparator.
        - -
        default MutateNda<V>
        -
        mut()
        -
        + Comparator.
        +
        default MutateNda<V>mut()
        This method exposes an API for mutating the state of this tensor.
        - -
        default boolean
        -
        none(Predicate<V> predicate)
        -
        +
        default booleannone(java.util.function.Predicate<V> predicate)
        Iterates over every element of this nd-array, and checks whether none of the elements match the provided lambda.
        - -
        static Nda<Boolean>
        -
        of(boolean... value)
        -
        +
        static Nda<java.lang.Boolean>of(boolean... value)
        Constructs a vector of booleans based on the provided array.
        - -
        static Nda<Byte>
        -
        of(byte... value)
        -
        +
        static Nda<java.lang.Byte>of(byte... value)
        Constructs a vector of bytes based on the provided array.
        - -
        static Nda<Double>
        -
        of(double value)
        -
         
        -
        static Nda<Double>
        -
        of(double... value)
        -
        +
        static <V> WithShapeOrScalarOrVector<V>of(java.lang.Class<V> type) +
        This is the entry point to the fluent nd-array builder API for building + Nda instances in a readable and type safe fashion.
        +
        static Nda<java.lang.Double>of(double... value)
        Constructs a vector of doubles based on the provided array.
        - -
        static Nda<Float>
        -
        of(float... value)
        -
        +
        static Nda<java.lang.Double>of(double value) 
        static Nda<java.lang.Float>of(float... value)
        Constructs a vector of floats based on the provided array.
        - -
        static Nda<Integer>
        -
        of(int... value)
        -
        +
        static Nda<java.lang.Integer>of(int... value)
        Constructs a vector of ints based on the provided array.
        - -
        static Nda<Long>
        -
        of(long... value)
        -
        -
        Constructs a vector of longs based on the provided array.
        -
        -
        static Nda<Short>
        -
        of(short... value)
        -
        -
        Constructs a vector of shorts based on the provided array.
        -
        - -
        of(Class<V> type)
        -
        -
        This is the entry point to the fluent nd-array builder API for building - Nda instances in a readable and type safe fashion.
        -
        -
        static <T> Nda<T>
        -
        of(Iterable<T> values)
        -
        +
        static <T> Nda<T>of(java.lang.Iterable<T> values)
        Constructs a vector of objects based on the provided iterable.
        - -
        static <T> Nda<T>
        -
        of(List<T> values)
        -
        +
        static <T> Nda<T>of(java.util.List<T> values)
        Constructs a vector of objects based on the provided list.
        - -
        static Nda<Boolean>
        -
        of(Shape shape, - boolean... values)
        -
        +
        static Nda<java.lang.Long>of(long... value) +
        Constructs a vector of longs based on the provided array.
        +
        static Nda<java.lang.Boolean>of(Shape shape, + boolean... values)
        Use this to construct and return a boolean based nd-array of the specified shape and initial values.
        - -
        static Nda<Byte>
        -
        of(Shape shape, - byte... values)
        -
        +
        static Nda<java.lang.Byte>of(Shape shape, + byte... values)
        Use this to construct and return a byte based nd-array of the specified shape and initial values.
        - -
        static Nda<Double>
        -
        of(Shape shape, - double... values)
        -
        +
        static Nda<java.lang.Double>of(Shape shape, + double... values)
        Use this to construct and return a double based nd-array of the specified shape and initial values.
        - -
        static Nda<Float>
        -
        of(Shape shape, - float... values)
        -
        +
        static Nda<java.lang.Float>of(Shape shape, + float... values)
        Use this to construct and return a float based nd-array of the specified shape and initial values.
        - -
        static Nda<Integer>
        -
        of(Shape shape, - int... values)
        -
        +
        static Nda<java.lang.Integer>of(Shape shape, + int... values)
        Use this to construct and return a int based nd-array of the specified shape and initial values.
        - -
        static Nda<Long>
        -
        of(Shape shape, - long... values)
        -
        +
        static Nda<java.lang.Long>of(Shape shape, + long... values)
        Use this to construct and return a long based nd-array of the specified shape and initial values.
        - -
        static Nda<Short>
        -
        of(Shape shape, - short... values)
        -
        +
        static Nda<java.lang.Short>of(Shape shape, + short... values)
        Use this to construct and return a short based nd-array of the specified shape and initial values.
        - -
        static <T> Nda<T>
        -
        of(Shape shape, - T... values)
        -
        +
        static <T> Nda<T>of(Shape shape, + T... values)
        Use this to construct and return an object based nd-array of the specified shape and initial values.
        - -
        static <T> Nda<T>
        -
        of(T... values)
        -
        +
        static Nda<java.lang.Short>of(short... value) +
        Constructs a vector of shorts based on the provided array.
        +
        static <T> Nda<T>of(T... values)
        Constructs a vector of objects based on the provided array.
        - - - -
        +
        static WithShapeOrScalarOrVector<java.math.BigDecimal>ofBigDecimals()
        This is a shortcut method for Nda.of(BigDecimal.class) - used to build Ndas storing BigDecimals.
        - - - -
        + used to build Ndas storing BigDecimals.
        +
        static WithShapeOrScalarOrVector<java.lang.Boolean>ofBooleans()
        This is a shortcut method for Nda.of(Boolean.class) - used to build Ndas storing Booleans.
        - - - -
        + used to build Ndas storing Booleans.
        +
        static WithShapeOrScalarOrVector<java.lang.Byte>ofBytes()
        This is a shortcut method for Nda.of(Byte.class) - used to build Ndas storing Bytes.
        - - - -
        + used to build Ndas storing Bytes.
        +
        static WithShapeOrScalarOrVector<java.lang.Character>ofChars()
        This is a shortcut method for Nda.of(Character.class) - used to build Ndas storing Characters.
        - - - -
        + used to build Ndas storing Characters.
        +
        static WithShapeOrScalarOrVector<java.lang.Double>ofDoubles()
        This is a shortcut method for Nda.of(Double.class) - used to build Ndas storing Doubles.
        - - - -
        + used to build Ndas storing Doubles.
        +
        static WithShapeOrScalarOrVector<java.lang.Float>ofFloats()
        This is a shortcut method for Nda.of(Float.class) - used to build Ndas storing Floats.
        - - - -
        + used to build Ndas storing Floats.
        +
        static WithShapeOrScalarOrVector<java.lang.Integer>ofInts()
        This is a shortcut method for Nda.of(Integer.class) - used to build Ndas storing Integers.
        - - - -
        + used to build Ndas storing Integers.
        +
        static WithShapeOrScalarOrVector<java.lang.Long>ofLongs()
        This is a shortcut method for Nda.of(Long.class) - used to build Ndas storing Longs.
        - - - -
        + used to build Ndas storing Longs.
        +
        static WithShapeOrScalarOrVector<java.lang.Number>ofNumbers()
        This is a shortcut method for Nda.of(Number.class) - used to build Ndas storing Numbers.
        - - - -
        + used to build Ndas storing Numbers.
        +
        static WithShapeOrScalarOrVector<java.lang.Object>ofObjects()
        This is a shortcut method for Nda.of(Object.class) - used to build Ndas storing Objects.
        - - - -
        + used to build Ndas storing Objects.
        +
        static WithShapeOrScalarOrVector<java.lang.Short>ofShorts()
        This is a shortcut method for Nda.of(Short.class) - used to build Ndas storing Shorts.
        - - - -
        + used to build Ndas storing Shorts.
        +
        static WithShapeOrScalarOrVector<java.lang.String>ofStrings()
        This is a shortcut method for Nda.of(String.class) - used to build Ndas storing Strings.
        - - -
        permute(int... dims)
        -
        + used to build Ndas storing Strings.
        +
        Nda<V>permute(int... dims)
        Returns a view of the original tensor input with its dimensions permuted.
        Consider a 3-dimensional tensor x with shape (2×3×5), then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
        - - -
        reshape(int... shape)
        -
        +
        Nda<V>reshape(int... shape)
        Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape.
        - - - -
        +
        Nda<V>shallowCopy()
        This creates a copy where the underlying data is still the same.
        - -
        static <T> Collector<T,?,Nda<T>>
        -
        shaped(int... shape)
        -
        +
        static <T> java.util.stream.Collector<T,?,Nda<T>>shaped(int... shape)
        Returns a Collector that accumulates the input elements into a - new Nda with the specified shape.
        - - - -
        -
        This method returns a SliceBuilder instance exposing a simple builder API + new Nda with the specified shape.
        +
        AxisOrGet<V>slice() +
        This method returns a SliceBuilder instance exposing a simple builder API which enables the configuration of a slice of the current nd-array via method chaining.
        - -
        int
        - -
        +
        intsliceCount()
        This method returns the number of slices which have been created from this nd-array.
        - -
        default Stream<V>
        - -
         
        - - -
        -
        This method returns a String representation of this nd-array.
        -
        -
        default String
        - -
        +
        default java.util.stream.Stream<V>stream() 
        java.lang.StringtoString() +
        This method returns a String representation of this nd-array.
        +
        default java.lang.StringtoString(java.util.function.Consumer<NDPrintSettings> config)
        This allows you to provide a lambda which configures how this nd-array should be - converted to String instances.
        - -
        default String
        - -
        -
        Use this to turn this nd-array into a String instance based on the provided - NDPrintSettings instance, which allows you to configure things + converted to String instances.
        +
        default java.lang.StringtoString(NDPrintSettings config) +
        Use this to turn this nd-array into a String instance based on the provided + NDPrintSettings instance, which allows you to configure things like the number of chars per entry, delimiters, the number of items per line, etc.
        - - -
        transpose(int dim1, - int dim2)
        -
        +
        Nda<V>transpose(int dim1, + int dim2)
        Returns a view of the original tensor input the targeted axes are swapped / transposed.
        - - - -
         
        - -
        withLabels(String[]... labels)
        -
        -
        This method receives a nested String array which - ought to contain a label for the index of this nd-array.
        -
        - - -
        -
        This method receives a nested String list which +
        Nda<V>withLabel(java.lang.String label) 
        Nda<V>withLabels(java.util.List<java.util.List<java.lang.Object>> labels) +
        This method receives a nested String list which ought to contain a label for the index of this nd-array.
        - - - -
        +
        Nda<V>withLabels(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels)
        This method provides the ability to label not only the indices of the shape of this nd-array, but also the dimension of the shape.
        +
        Nda<V>withLabels(java.lang.String[]... labels) +
        This method receives a nested String array which + ought to contain a label for the index of this nd-array.
        +
        + +
          +
        • + + +

          Methods inherited from interface java.lang.Iterable

          +forEach, iterator, spliterator
        • +
        +
      • +
      +
    • +
    -
    -
    - -
    -

    Methods inherited from interface java.lang.Iterable

    -forEach, iterator, spliterator
    - - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Method Details

        -
          -
        • -
          -

          of

          -
          static <V> WithShapeOrScalarOrVector<V> of(Class<V> type)
          +
            +
          • + + +

            Method Detail

            + + + +
              +
            • +

              of

              +
              static <V> WithShapeOrScalarOrVector<V> of(java.lang.Class<V> type)
              This is the entry point to the fluent nd-array builder API for building - Nda instances in a readable and type safe fashion. - The returned WithShapeOrScalarOrVector is the next step in the - fluent Nda builder API which will lead to the creation + Nda instances in a readable and type safe fashion. + The returned WithShapeOrScalarOrVector is the next step in the + fluent Nda builder API which will lead to the creation of an nd-array storing values defined by the provided type class. A simple usage example would be:
              
              @@ -707,583 +847,708 @@ 

              of

              
                      Nda.of(Byte.class).withShape(2, 3).andWhere( (i, indices) -> i * 5 - 30 )
                  
              -
              -
              Parameters:
              +
              +
              Parameters:
              type - The type class of the items stored by the nd-array built by the exposed builder API.
              -
              Returns:
              -
              The next step of the Nda builder API which exposes methods for defining shapes.
              +
              Returns:
              +
              The next step of the Nda builder API which exposes methods for defining shapes.
              -
        • -
        • -
          -

          ofStrings

          -
          static WithShapeOrScalarOrVector<String> ofStrings()
          +
        + + + +
          +
        • +

          ofStrings

          +
          static WithShapeOrScalarOrVector<java.lang.String> ofStrings()
          This is a shortcut method for Nda.of(String.class) - used to build Ndas storing Strings.
          -
          -
          Returns:
          -
          The next step of the Nda builder API which exposes methods for defining shapes.
          + used to build Ndas storing Strings.
      +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
    -
  • -
    -

    ofInts

    - + + + + +
      +
    • +

      ofInts

      +
      static WithShapeOrScalarOrVector<java.lang.Integer> ofInts()
      This is a shortcut method for Nda.of(Integer.class) - used to build Ndas storing Integers.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Integers. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofDoubles

    -
    static WithShapeOrScalarOrVector<Double> ofDoubles()
    + + + + +
      +
    • +

      ofDoubles

      +
      static WithShapeOrScalarOrVector<java.lang.Double> ofDoubles()
      This is a shortcut method for Nda.of(Double.class) - used to build Ndas storing Doubles.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Doubles. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofFloats

    -
    static WithShapeOrScalarOrVector<Float> ofFloats()
    + + + + +
      +
    • +

      ofFloats

      +
      static WithShapeOrScalarOrVector<java.lang.Float> ofFloats()
      This is a shortcut method for Nda.of(Float.class) - used to build Ndas storing Floats.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Floats. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofLongs

    -
    static WithShapeOrScalarOrVector<Long> ofLongs()
    + + + + +
      +
    • +

      ofLongs

      +
      static WithShapeOrScalarOrVector<java.lang.Long> ofLongs()
      This is a shortcut method for Nda.of(Long.class) - used to build Ndas storing Longs.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Longs. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofBooleans

    -
    static WithShapeOrScalarOrVector<Boolean> ofBooleans()
    + + + + +
      +
    • +

      ofBooleans

      +
      static WithShapeOrScalarOrVector<java.lang.Boolean> ofBooleans()
      This is a shortcut method for Nda.of(Boolean.class) - used to build Ndas storing Booleans.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Booleans. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofChars

    - + + + + +
      +
    • +

      ofChars

      +
      static WithShapeOrScalarOrVector<java.lang.Character> ofChars()
      This is a shortcut method for Nda.of(Character.class) - used to build Ndas storing Characters.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Characters. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofBytes

    -
    static WithShapeOrScalarOrVector<Byte> ofBytes()
    + + + + +
      +
    • +

      ofBytes

      +
      static WithShapeOrScalarOrVector<java.lang.Byte> ofBytes()
      This is a shortcut method for Nda.of(Byte.class) - used to build Ndas storing Bytes.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Bytes. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofShorts

    -
    static WithShapeOrScalarOrVector<Short> ofShorts()
    + + + + +
      +
    • +

      ofShorts

      +
      static WithShapeOrScalarOrVector<java.lang.Short> ofShorts()
      This is a shortcut method for Nda.of(Short.class) - used to build Ndas storing Shorts.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Shorts. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofObjects

    -
    static WithShapeOrScalarOrVector<Object> ofObjects()
    + + + + +
      +
    • +

      ofObjects

      +
      static WithShapeOrScalarOrVector<java.lang.Object> ofObjects()
      This is a shortcut method for Nda.of(Object.class) - used to build Ndas storing Objects.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Objects. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofNumbers

    -
    static WithShapeOrScalarOrVector<Number> ofNumbers()
    + + + + +
      +
    • +

      ofNumbers

      +
      static WithShapeOrScalarOrVector<java.lang.Number> ofNumbers()
      This is a shortcut method for Nda.of(Number.class) - used to build Ndas storing Numbers.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      + used to build Ndas storing Numbers. +
      +
      Returns:
      +
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
  • -
  • -
    -

    ofBigDecimals

    -
    static WithShapeOrScalarOrVector<BigDecimal> ofBigDecimals()
    + + + + +
      +
    • +

      ofBigDecimals

      +
      static WithShapeOrScalarOrVector<java.math.BigDecimal> ofBigDecimals()
      This is a shortcut method for Nda.of(BigDecimal.class) - used to build Ndas storing BigDecimals.
      -
      -
      Returns:
      -
      The next step of the Nda builder API which exposes methods for defining shapes.
      -
      -
    -
  • -
  • -
    -

    of

    -
    static Nda<Double> of(double value)
    -
    -
    Parameters:
    + used to build Ndas storing BigDecimals. +
    +
    Returns:
    +
    The next step of the Nda builder API which exposes methods for defining shapes.
    +
    +
  • + + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Double> of(double value)
      +
      +
      Parameters:
      value - The scalar value which ought to be represented as nd-array.
      -
      Returns:
      +
      Returns:
      A scalar double nd-array.
      -
    • -
    • -
      -

      of

      -
      static Nda<Float> of(float... value)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Float> of(float... value)
      Constructs a vector of floats based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The array of floats from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of floats.
      -
    • -
    • -
      -

      of

      -
      static Nda<Double> of(double... value)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Double> of(double... value)
      Constructs a vector of doubles based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The array of doubles from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of doubles.
      -
    • -
    • -
      -

      of

      -
      static Nda<Byte> of(byte... value)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Byte> of(byte... value)
      Constructs a vector of bytes based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The array of bytes from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of bytes.
      -
    • -
    • -
      -

      of

      -
      static Nda<Integer> of(int... value)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Integer> of(int... value)
      Constructs a vector of ints based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The array of ints from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of ints.
      -
    • -
    • -
      -

      of

      -
      static Nda<Long> of(long... value)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Long> of(long... value)
      Constructs a vector of longs based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The array of longs from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of longs.
      -
    • -
    • -
      -

      of

      -
      static Nda<Short> of(short... value)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Short> of(short... value)
      Constructs a vector of shorts based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The array of shorts from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of shorts.
      -
    • -
    • -
      -

      of

      -
      static Nda<Boolean> of(boolean... value)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Boolean> of(boolean... value)
      Constructs a vector of booleans based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The array of booleans from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of shorts.
      -
    • -
    • -
      -

      of

      -
      @SafeVarargs -static <T> Nda<T> of(T... values)
      +
    + + + + + +
      +
    • +

      of

      +
      @SafeVarargs
      +static <T> Nda<T> of(T... values)
      Constructs a vector of objects based on the provided array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      values - The array of objects from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of objects.
      -
    • -
    • -
      -

      of

      -
      static Nda<Double> of(Shape shape, - double... values)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Double> of(Shape shape,
      +                                double... values)
      Use this to construct and return a double based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided double array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Nda<Float> of(Shape shape, - float... values)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Float> of(Shape shape,
      +                               float... values)
      Use this to construct and return a float based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided float array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Nda<Byte> of(Shape shape, - byte... values)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Byte> of(Shape shape,
      +                              byte... values)
      Use this to construct and return a byte based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided byte array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Nda<Integer> of(Shape shape, - int... values)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Integer> of(Shape shape,
      +                                 int... values)
      Use this to construct and return a int based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided int array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Nda<Long> of(Shape shape, - long... values)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Long> of(Shape shape,
      +                              long... values)
      Use this to construct and return a long based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided long array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Nda<Short> of(Shape shape, - short... values)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Short> of(Shape shape,
      +                               short... values)
      Use this to construct and return a short based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided short array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Nda<Boolean> of(Shape shape, - boolean... values)
      +
    + + + +
      +
    • +

      of

      +
      static Nda<java.lang.Boolean> of(Shape shape,
      +                                 boolean... values)
      Use this to construct and return a boolean based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided boolean array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      @SafeVarargs -static <T> Nda<T> of(Shape shape, - T... values)
      +
    + + + + + +
      +
    • +

      of

      +
      @SafeVarargs
      +static <T> Nda<T> of(Shape shape,
      +                                  T... values)
      Use this to construct and return an object based nd-array of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the nd-array will be populated based on repeated iteration over the provided object array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting nd-array consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static <T> Nda<T> of(Iterable<T> values)
      +
    + + + +
      +
    • +

      of

      +
      static <T> Nda<T> of(java.lang.Iterable<T> values)
      Constructs a vector of objects based on the provided iterable.
      -
      -
      Parameters:
      +
      +
      Parameters:
      values - The iterable of objects from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of objects.
      -
    • -
    • -
      -

      of

      -
      static <T> Nda<T> of(List<T> values)
      +
    + + + +
      +
    • +

      of

      +
      static <T> Nda<T> of(java.util.List<T> values)
      Constructs a vector of objects based on the provided list.
      -
      -
      Parameters:
      +
      +
      Parameters:
      values - The list of objects from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D nd-array of objects.
      -
    • -
    • -
      -

      getLabel

      -
      default String getLabel()
      +
    + + + +
      +
    • +

      getLabel

      +
      default java.lang.String getLabel()
      A nd-array can have a label. This label is used for example when printing the nd-array. When loading a CSV file for example the label of the nd-array will be taken from the cell where the header row and the first column intersect.
      -
      -
      Returns:
      +
      +
      Returns:
      The label/name of the nd-array.
      -
    • -
    • -
      -

      label

      -
      default String label()
      +
    + + + +
      +
    • +

      label

      +
      default java.lang.String label()
      A nd-array can have a label. This label is used for example when printing the nd-array. When loading a CSV file for example the label of the nd-array will be taken from the cell where the header row and the first column intersect. - This is a shorter version of getLabel().
      -
      -
      Returns:
      + This is a shorter version of getLabel(). +
      +
      Returns:
      The label/name of the nd-array.
      -
    • -
    • -
      -

      isSlice

      -
      boolean isSlice()
      +
    + + + +
      +
    • +

      isSlice

      +
      boolean isSlice()
      If this nd-array is a slice of a parent nd-array then this method will yield true. - Slices can be created by calling the variations of the "getAt(int...)" method.
      -
      -
      Returns:
      + Slices can be created by calling the variations of the "getAt(int...)" method. +
      +
      Returns:
      The truth value determining if this nd-array is a slice of another nd-array.
      -
      See Also:
      -
      - -
      +
      See Also:
      +
      getAt(int...), +slice()
      -
    • -
    • -
      -

      isShallowCopy

      -
      boolean isShallowCopy()
      +
    + + + +
      +
    • +

      isShallowCopy

      +
      boolean isShallowCopy()
      If this nd-array is a shallow copy of a parent nd-array then this method will yield true. - Shallow copies can be created by calling the "shallowCopy()" method.
      -
      -
      Returns:
      + Shallow copies can be created by calling the "shallowCopy()" method. +
      +
      Returns:
      The truth value determining if this nd-array is a shallow copy of another nd-array.
      -
      See Also:
      -
      - -
      +
      See Also:
      +
      shallowCopy()
      -
    • -
    • -
      -

      isPartialSlice

      -
      boolean isPartialSlice()
      +
    + + + +
      +
    • +

      isPartialSlice

      +
      boolean isPartialSlice()
      If this nd-array is a partial slice of a parent nd-array then this method will yield true. A partial slice is a slice which does not view all the parents items. - Partial slices can be created by calling the variations of the "getAt(int...)" method. - This is the inverse of isFullSlice().
      -
      -
      Returns:
      + Partial slices can be created by calling the variations of the "getAt(int...)" method. + This is the inverse of isFullSlice(). +
      +
      Returns:
      The truth value determining if this nd-array is a partial slice of another nd-array.
      -
    • -
    • -
      -

      isFullSlice

      -
      default boolean isFullSlice()
      +
    + + + +
      +
    • +

      isFullSlice

      +
      default boolean isFullSlice()
      If this nd-array is a full slice of a parent nd-array then this method will yield true. A full slice is a slice which views all the parents items. - Full slices can be created by calling the variations of the "getAt(int...)" method. - This is the inverse of isPartialSlice().
      -
      -
      Returns:
      + Full slices can be created by calling the variations of the "getAt(int...)" method. + This is the inverse of isPartialSlice(). +
      +
      Returns:
      The truth value determining if this nd-array is a full slice of another nd-array.
      -
    • -
    • -
      -

      sliceCount

      -
      int sliceCount()
      +
    + + + +
      +
    • +

      sliceCount

      +
      int sliceCount()
      This method returns the number of slices which have been created from this nd-array. - It does so by accessing the Relation component if present + It does so by accessing the Relation component if present which internally keeps track of slices via weak references.
      -
      -
      Returns:
      +
      +
      Returns:
      The number of slices derived from this nd-array.
      -
    • -
    • -
      -

      isSliceParent

      -
      boolean isSliceParent()
      +
    + + + +
      +
    • +

      isSliceParent

      +
      boolean isSliceParent()
      If slices have been derived from this nd-array then it is a "slice parent". This is what this method will determine, in which case, it will return true.
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if slices have been derived from this nd-array.
      -
    • -
    • -
      -

      getItemType

      -
      Class<V> getItemType()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getItemType

      +
      java.lang.Class<V> getItemType()
      +
      +
      Returns:
      The type class of individual value items within this nd-array.
      -
    • -
    • -
      -

      itemType

      -
      default Class<V> itemType()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      itemType

      +
      default java.lang.Class<V> itemType()
      +
      +
      Returns:
      The type class of individual value items within this nd-array.
      -
    • -
    • -
      -

      withLabel

      -
      Nda<V> withLabel(String label)
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      withLabel

      +
      Nda<V> withLabel(java.lang.String label)
      +
      +
      Returns:
      A new nd-array which is a shallow copy of this nd-array but with a different label.
      -
    • -
    • -
      -

      withLabels

      -
      Nda<V> withLabels(String[]... labels)
      -
      This method receives a nested String array which +
    + + + +
      +
    • +

      withLabels

      +
      Nda<V> withLabels(java.lang.String[]... labels)
      +
      This method receives a nested String array which ought to contain a label for the index of this nd-array. The index for a single element of this nd-array would be an array of numbers as long as the rank where every number is @@ -1295,19 +1560,22 @@

      withLabels

      dim 0 : ["A", "B"]
      dim 1 : ["1", "2", "3"]

      -
      -
      Parameters:
      +
      +
      Parameters:
      labels - A nested String array containing labels for indexes of the nd-array dimensions.
      -
      Returns:
      +
      Returns:
      This nd-array (method chaining).
      -
    • -
    • -
      -

      withLabels

      -
      Nda<V> withLabels(List<List<Object>> labels)
      -
      This method receives a nested String list which +
    + + + +
      +
    • +

      withLabels

      +
      Nda<V> withLabels(java.util.List<java.util.List<java.lang.Object>> labels)
      +
      This method receives a nested String list which ought to contain a label for the index of this nd-array. The index for a single element of this nd-array would be an array of numbers as long as the rank where every number is @@ -1319,18 +1587,21 @@

      withLabels

      dim 0 : ["A", "B"]
      dim 1 : ["1", "2", "3"]

      -
      -
      Parameters:
      +
      +
      Parameters:
      labels - A nested String list containing labels for indexes of the nd-array dimensions.
      -
      Returns:
      +
      Returns:
      This nd-array (method chaining).
      -
    • -
    • -
      -

      withLabels

      -
      Nda<V> withLabels(Map<Object,List<Object>> labels)
      +
    + + + +
      +
    • +

      withLabels

      +
      Nda<V> withLabels(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels)
      This method provides the ability to label not only the indices of the shape of this nd-array, but also the dimension of the shape. @@ -1343,125 +1614,149 @@

      withLabels

      "dim 1" : ["1", "2", "3"]
      ]

      -
      -
      Parameters:
      +
      +
      Parameters:
      labels - A map in which the keys are dimension labels and the values are lists of index labels for the dimension.
      -
      Returns:
      +
      Returns:
      This nd-array (method chaining).
      -
    • -
    • -
      -

      stream

      -
      default Stream<V> stream()
      -
      -
      Returns:
      -
      A Stream of the items in this Nda.
      +
    + + + +
      +
    • +

      stream

      +
      default java.util.stream.Stream<V> stream()
      +
      +
      Returns:
      +
      A Stream of the items in this Nda.
      -
    • -
    • -
      -

      filter

      -
      default Stream<V> filter(Predicate<V> predicate)
      +
    + + + +
      +
    • +

      filter

      +
      default java.util.stream.Stream<V> filter(java.util.function.Predicate<V> predicate)
      A convenience method for stream().filter( predicate ).
      -
      -
      Parameters:
      -
      predicate - The predicate to filter the items of this Nda.
      -
      Returns:
      -
      A Stream of the items in this Nda which match the predicate.
      -
      - -
    • -
    • -
      -

      flatMap

      -
      default <R> Stream<R> flatMap(Function<V,Stream<R>> mapper)
      +
      +
      Parameters:
      +
      predicate - The predicate to filter the items of this Nda.
      +
      Returns:
      +
      A Stream of the items in this Nda which match the predicate.
      +
      +
    • +
    + + + +
      +
    • +

      flatMap

      +
      default <R> java.util.stream.Stream<R> flatMap(java.util.function.Function<V,java.util.stream.Stream<R>> mapper)
      A convenience method for nda.stream().flatMap( mapper ), - which turns this Nda into a Stream of its items.
      + which turns this Nda into a Stream of its items.
      Here an example of how to use this method :
      
           var nda = Nda.of( -2, -1, 0, 1, 2 );
           var list = nda.flatMap( i -> Stream.of( i * 2, i * 3 ) ).toList();
           // list = [-4, -6, -2, -3, 0, 0, 2, 3, 4, 6, 6, 9]
         
      -
      -
      Parameters:
      -
      mapper - The mapper to map the items of this Nda.
      -
      Returns:
      -
      A Stream of the items in this Nda which match the predicate.
      -
      - -
    • -
    • -
      -

      shaped

      -
      static <T> Collector<T,?,Nda<T>> shaped(int... shape)
      +
      +
      Parameters:
      +
      mapper - The mapper to map the items of this Nda.
      +
      Returns:
      +
      A Stream of the items in this Nda which match the predicate.
      +
      +
    • +
    + + + +
      +
    • +

      shaped

      +
      static <T> java.util.stream.Collector<T,?,Nda<T>> shaped(int... shape)
      Returns a Collector that accumulates the input elements into a - new Nda with the specified shape.
      + new Nda with the specified shape.
      Usage example :
      
           var nda = Stream.of( 1, 2, 3, 4, 5, 6 )
                             .collect( Nda.shaped( 2, 3 ) );
        
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - the type of the input elements
      -
      Parameters:
      +
      Parameters:
      shape - The shape of the nd-array to be returned.
      -
      Returns:
      +
      Returns:
      a Collector which collects all the input elements into a - Nda, in encounter order.
      + Nda, in encounter order.
      -
    • -
    • -
      -

      every

      -
      default boolean every(Predicate<V> predicate)
      +
    + + + +
      +
    • +

      every

      +
      default boolean every(java.util.function.Predicate<V> predicate)
      Iterates over every element of this nd-array, and checks whether all elements are true according to the provided lambda.
      -
      -
      Parameters:
      +
      +
      Parameters:
      predicate - The lambda to check each element against.
      -
      Returns:
      +
      Returns:
      true if every item in the nd-array matches the predicate, false otherwise.
      -
    • -
    • -
      -

      any

      -
      default boolean any(Predicate<V> predicate)
      +
    + + + +
      +
    • +

      any

      +
      default boolean any(java.util.function.Predicate<V> predicate)
      Iterates over every element of this nd-array, and checks whether any element matches the provided lambda.
      -
      -
      Parameters:
      +
      +
      Parameters:
      predicate - The lambda to check each element against.
      -
      Returns:
      +
      Returns:
      true if any item in the nd-array matches the predicate, false otherwise.
      -
    • -
    • -
      -

      none

      -
      default boolean none(Predicate<V> predicate)
      +
    + + + +
      +
    • +

      none

      +
      default boolean none(java.util.function.Predicate<V> predicate)
      Iterates over every element of this nd-array, and checks whether none of the elements match the provided lambda.
      -
      -
      Parameters:
      +
      +
      Parameters:
      predicate - The lambda to check each element against.
      -
      Returns:
      +
      Returns:
      true if none of the items in the nd-array match the predicate, false otherwise.
      -
    • -
    • -
      -

      count

      -
      default int count(Predicate<V> predicate)
      +
    + + + +
      +
    • +

      count

      +
      default int count(java.util.function.Predicate<V> predicate)
      Iterates over every element of this nd-array, and counts the number of times the provided lambda matches the items of this array.

      @@ -1472,201 +1767,243 @@

      count

      var count = nda.count( i -> i > 5 ); System.out.println( count ); // prints 5
      -
      -
      Parameters:
      +
      +
      Parameters:
      predicate - The lambda to check each element against.
      -
      Returns:
      +
      Returns:
      The number of items in the nd-array that match the predicate.
      -
    • -
    • -
      -

      minItem

      -
      default V minItem(Comparator<V> comparator)
      +
    + + + +
      +
    • +

      minItem

      +
      default V minItem(java.util.Comparator<V> comparator)
      Returns the minimum item of this nd-array according to the provided - Comparator. This is a special case of a reduction.
      -
      -
      Parameters:
      -
      comparator - The Comparator to use to determine the order of the items in the nd-array.
      -
      Returns:
      + Comparator. This is a special case of a reduction. +
      +
      Parameters:
      +
      comparator - The Comparator to use to determine the order of the items in the nd-array.
      +
      Returns:
      The minimum value in the nd-array.
      -
    • -
    • -
      -

      maxItem

      -
      default V maxItem(Comparator<V> comparator)
      +
    + + + +
      +
    • +

      maxItem

      +
      default V maxItem(java.util.Comparator<V> comparator)
      Returns the maximum item of this nd-array according to the provided - Comparator. This is a special case of a reduction.
      -
      -
      Parameters:
      -
      comparator - The Comparator to use to determine the order of the items in the nd-array.
      -
      Returns:
      + Comparator. This is a special case of a reduction. +
      +
      Parameters:
      +
      comparator - The Comparator to use to determine the order of the items in the nd-array.
      +
      Returns:
      The maximum value in the nd-array.
      -
    • -
    • -
      -

      getRawData

      -
      Object getRawData()
      +
    + + + +
      +
    • +

      getRawData

      +
      java.lang.Object getRawData()
      This returns an unprocessed version of the underlying data of this nd-array. If this nd-array is outsourced (stored on a device), then the data will be loaded into an array and returned by this method. Do not expect the returned array to be actually stored within the nd-array itself! - Contrary to the getItems() method, this one will - return the data in an unbiased form, where for example a virtual (see Tensor.isVirtual()) + Contrary to the getItems() method, this one will + return the data in an unbiased form, where for example a virtual (see Tensor.isVirtual()) nd-array will have this method return an array of length 1.
      -
      -
      Returns:
      +
      +
      Returns:
      An unbiased copy of the underlying data of this nd-array.
      -
    • -
    • -
      -

      getDataAt

      -
      V getDataAt(int i)
      +
    + + + +
      +
    • +

      getDataAt

      +
      V getDataAt(int i)
      Use this to access elements of the underlying data array without any index - transformation applied to it. This is usually similar to the item(int) method, + transformation applied to it. This is usually similar to the item(int) method, however for nd-arrays which are sliced or permuted views of the data of another nd-array, this method will always be unbiased access of the raw data...
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The position of the targeted item within the raw data array of an NDArray implementation.
      -
      Returns:
      +
      Returns:
      The found object sitting at the specified index position.
      -
    • -
    • -
      -

      getItems

      -
      default List<V> getItems()
      -
      A more verbose version of the items() method (best used by JVM languages with property support).
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getItems

      +
      default java.util.List<V> getItems()
      +
      A more verbose version of the items() method (best used by JVM languages with property support).
      +
      +
      Returns:
      A list of the items in this nd-array.
      -
    • -
    • -
      -

      items

      -
      default List<V> items()
      -
      A more concise version of the getItems() method.
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      items

      +
      default java.util.List<V> items()
      +
      A more concise version of the getItems() method.
      +
      +
      Returns:
      A list of the items in this nd-array.
      -
    • -
    • -
      -

      getRawItems

      -
      Object getRawItems()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getRawItems

      +
      java.lang.Object getRawItems()
      +
      +
      Returns:
      The items of this nd-array as a (if possible) primitive array.
      -
    • -
    • -
      -

      item

      -
      default V item(int i)
      +
    + + + +
      +
    • +

      item

      +
      default V item(int i)
      The following method returns a single item within this nd-array targeted by the provided integer index.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The scalar index of the item which should be returned by the method.
      -
      Returns:
      +
      Returns:
      The item found at the targeted index.
      -
    • -
    • -
      -

      item

      -
      default V item(int... indices)
      +
    + + + +
      +
    • +

      item

      +
      default V item(int... indices)
      This method returns a raw value item within this nd-array targeted by an index array which is expected to hold an index for every dimension of the shape of this nd-array. So the provided array must have the same length as the rank of this nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The index array which targets a single value item within this nd-array.
      -
      Returns:
      +
      Returns:
      The found raw value item targeted by the provided index array.
      -
    • -
    • -
      -

      item

      -
      default V item()
      -
      Equivalent to the #item(0) and getItem().
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      item

      +
      default V item()
      +
      Equivalent to the #item(0) and getItem().
      +
      +
      Returns:
      The first item of this nd-array.
      -
    • -
    • -
      -

      getItem

      -
      default V getItem()
      -
      Equivalent to the #item(0) and item().
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getItem

      +
      default V getItem()
      +
      Equivalent to the #item(0) and item().
      +
      +
      Returns:
      The first item of this nd-array.
      -
    • -
    • -
      -

      getItemsAs

      -
      default <A> A getItemsAs(Class<A> arrayTypeClass)
      +
    + + + +
      +
    • +

      getItemsAs

      +
      default <A> A getItemsAs(java.lang.Class<A> arrayTypeClass)
      Use this to get the items of this nd-array as a primitive array of the specified type.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      A - The type of the array to return.
      -
      Parameters:
      +
      Parameters:
      arrayTypeClass - The class of the array type to return.
      -
      Returns:
      +
      Returns:
      The items of this nd-array as a primitive array of the specified type.
      -
    • -
    • -
      -

      getDataAs

      -
      default <A> A getDataAs(Class<A> arrayTypeClass)
      -
      Use this to get the items of the underlying Data buffer +
    + + + +
      +
    • +

      getDataAs

      +
      default <A> A getDataAs(java.lang.Class<A> arrayTypeClass)
      +
      Use this to get the items of the underlying Data buffer of this nd-array as a primitive array of the specified type. Note that the length of the returned array may be different from the size of this nd-array. This is the case if this nd-array is a slice of another larger nd-array.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      A - The type of the array to return.
      -
      Parameters:
      +
      Parameters:
      arrayTypeClass - The class of the array type to return.
      -
      Returns:
      +
      Returns:
      The items of this nd-array as a primitive array of the specified type.
      -
    • -
    • -
      -

      slice

      -
      AxisOrGet<V> slice()
      -
      This method returns a SliceBuilder instance exposing a simple builder API +
    + + + +
      +
    • +

      slice

      +
      AxisOrGet<V> slice()
      +
      This method returns a SliceBuilder instance exposing a simple builder API which enables the configuration of a slice of the current nd-array via method chaining.
      The following code snippet slices a 3-dimensional nd-array into a nd-array of shape (2x1x3)
      
      @@ -1676,187 +2013,223 @@ 

      slice

      .axis().from(0).to(2) .get();
      -
      -
      Returns:
      -
      An instance of the SliceBuilder class exposing a readable builder API for creating slices.
      -
      - -
    • -
    • -
      -

      concatAt

      -
      Nda<V> concatAt(int axis, - Nda<V> other, - Nda<V>... ndArrays)
      +
      +
      Returns:
      +
      An instance of the SliceBuilder class exposing a readable builder API for creating slices.
      +
      +
    • +
    + + + +
      +
    • +

      concatAt

      +
      Nda<V> concatAt(int axis,
      +                Nda<V> other,
      +                Nda<V>... ndArrays)
      This method concatenates the provided nd-arrays together with this nd-array along a specified axis. The provided nd-arrays must have the same shape and data type as the current nd-array, except for the specified axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      axis - The axis along which the provided nd-arrays should be concatenated. The axis must be within the range of the rank of the current nd-array.
      other - The other nd-arrays which should be concatenated with this nd-array.
      ndArrays - The non-null, non-empty nd-arrays which should be concatenated together with this and the other nd-array. The nd-arrays all must have the same shape as this nd-array, except for the specified axis. Also, it must have the same data type as the current nd-array.
      -
      Returns:
      +
      Returns:
      A new nd-array which is the concatenation of the current nd-array and the provided nd-arrays.
      -
    • -
    • -
      -

      concatAt

      -
      Nda<V> concatAt(int axis, - Nda<V> other)
      +
    + + + +
      +
    • +

      concatAt

      +
      Nda<V> concatAt(int axis,
      +                Nda<V> other)
      This method concatenates the provided nd-array together with this nd-array along a specified axis. The provided nd-array must have the same shape and data type as this nd-array, except for the specified axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      axis - The axis along which the provided nd-arrays should be concatenated. The axis must be within the range of the rank of the current nd-array.
      other - The other nd-arrays which should be concatenated with this nd-array.
      -
      Returns:
      +
      Returns:
      A new nd-array which is the concatenation of the current nd-array and the provided nd-arrays.
      -
    • -
    • -
      -

      getAt

      -
      Nda<V> getAt(int... indices)
      +
    + + + +
      +
    • +

      getAt

      +
      Nda<V> getAt(int... indices)
      The following method enables access to specific scalar elements within the nd-array. The method name also translates to the subscription operator in Groovy.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The index array of the element which should be returned.
      -
      Returns:
      +
      Returns:
      An element located at the provided index.
      -
    • -
    • -
      -

      getAt

      -
      Nda<V> getAt(Number i)
      +
    + + + +
      +
    • +

      getAt

      +
      Nda<V> getAt(java.lang.Number i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      Nda<V> get(int... indices)
      +
    + + + +
      +
    • +

      get

      +
      Nda<V> get(int... indices)
      The following method enables access to specific scalar elements within the nd-array. The method name also translates to the subscription operator in Groovy.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The index array of the element which should be returned.
      -
      Returns:
      +
      Returns:
      An element located at the provided index.
      -
    • -
    • -
      -

      getAt

      -
      Nda<V> getAt(Object... args)
      +
    + + + +
      +
    • +

      getAt

      +
      Nda<V> getAt(java.lang.Object... args)
      The following method enables the creation of nd-array slices which access the same underlying data (possibly from a different view). The method name also translates to the subscription operator in Groovy.
      -
      -
      Parameters:
      +
      +
      Parameters:
      args - An arbitrary number of arguments which can be used for slicing.
      -
      Returns:
      +
      Returns:
      A slice nd-array created based on the passed keys.
      -
    • -
    • -
      -

      get

      -
      Nda<V> get(Object... args)
      +
    + + + +
      +
    • +

      get

      +
      Nda<V> get(java.lang.Object... args)
      The following method enables the creation of nd-array slices which access the same underlying data (possibly from a different view). The method name also translates to the subscription operator in Groovy.
      -
      -
      Parameters:
      +
      +
      Parameters:
      args - An arbitrary number of arguments which can be used for slicing.
      -
      Returns:
      +
      Returns:
      A slice nd-array created based on the passed keys.
      -
    • -
    • -
      -

      getAt

      -
      Nda<V> getAt(int i)
      +
    + + + +
      +
    • +

      getAt

      +
      Nda<V> getAt(int i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      Nda<V> get(int i)
      +
    + + + +
      +
    • +

      get

      +
      Nda<V> get(int i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      Nda<V> get(Number i)
      +
    + + + +
      +
    • +

      get

      +
      Nda<V> get(java.lang.Number i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      Nda<V> get(Object key)
      +
    + + + +
      +
    • +

      get

      +
      Nda<V> get(java.lang.Object key)
      This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      key - This object might be a wide range of objects including maps, lists or arrays...
      -
      Returns:
      +
      Returns:
      A slice nd-array or scalar value.
      -
    • -
    • -
      -

      getAt

      -
      Nda<V> getAt(Map<?,Integer> rangToSteps)
      +
    + + + +
      +
    • +

      getAt

      +
      Nda<V> getAt(java.util.Map<?,java.lang.Integer> rangToSteps)
      This method is most useful when used in Groovy where defining maps is done through square brackets, making it possible to slice nd-arrays like so:
      @@ -1868,35 +2241,41 @@

      getAt

      i... start indexAlias.
      j... end indexAlias. (inclusive!)
      k... step size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      rangToSteps - A map where the keys define where axes should be sliced and values which define the steps for the specific axis.
      -
      Returns:
      +
      Returns:
      A nd-array slice with an offset based on the provided map keys and steps based on the provided map values.
      -
    • -
    • -
      -

      getAt

      -
      Nda<V> getAt(List<?> key)
      +
    + + + +
      +
    • +

      getAt

      +
      Nda<V> getAt(java.util.List<?> key)
      This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      key - This object might be a wide range of objects including maps, lists or arrays...
      -
      Returns:
      +
      Returns:
      A slice nd-array or scalar value.
      -
    • -
    • -
      -

      mapTo

      -
      <T> Nda<T> mapTo(Class<T> typeClass, - Function<V,T> mapper)
      +
    + + + +
      +
    • +

      mapTo

      +
      <T> Nda<T> mapTo(java.lang.Class<T> typeClass,
      +                 java.util.function.Function<V,T> mapper)

      This is a convenience method for mapping a nd-array to a nd-array of new type based on a provided target item type and mapping lambda. @@ -1914,21 +2293,24 @@

      mapTo

      where ever it may reside back to the JVM, execute the mapping lambda, and then transfer the result back to the original location.

      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter of the items of the returned nd-array.
      -
      Parameters:
      +
      Parameters:
      typeClass - The class of the item type to which the items of this nd-array should be mapped.
      mapper - The lambda which maps the items of this nd-array to a new one.
      -
      Returns:
      +
      Returns:
      A new nd-array of type T.
      -
    • -
    • -
      -

      map

      -
      Nda<V> map(Function<V,V> mapper)
      +
    + + + +
      +
    • +

      map

      +
      Nda<V> map(java.util.function.Function<V,V> mapper)

      This method is a convenience method for mapping the items of this nd-array to another nd-array of the same type based on the provided lambda function, which will be applied @@ -1944,44 +2326,53 @@

      map

      This is a problem if this nd-array lives somewhere other than the JVM. So, therefore, this method will temporally transfer this nd-array from where ever it may reside back to the JVM, execute the mapping lambda, and then transfer the result back to the original location.
      -
      -
      Parameters:
      +
      +
      Parameters:
      mapper - The lambda which maps the items of this nd-array to a new one.
      -
      Returns:
      +
      Returns:
      A new nd-array of type V.
      -
    • -
    • -
      -

      deepCopy

      -
      Nda<V> deepCopy()
      +
    + + + +
      +
    • +

      deepCopy

      +
      Nda<V> deepCopy()
      This method creates and returns a new nd-array instance which is not only a copy of the configuration of this nd-array but also a copy of the underlying data array.
      (Note: the underlying nd-array will not be attached to any kind of computation graph)
      -
      -
      Returns:
      +
      +
      Returns:
      A new nd-array instance which is a deep copy of this nd-array.
      -
    • -
    • -
      -

      shallowCopy

      -
      Nda<V> shallowCopy()
      +
    + + + +
      +
    • +

      shallowCopy

      +
      Nda<V> shallowCopy()
      This creates a copy where the underlying data is still the same.
      (Note: the underlying nd-array will not be attached to any kind of computation graph)
      -
      -
      Returns:
      +
      +
      Returns:
      A shallow copy where the underlying data is shared with this nd-array.
      -
    • -
    • -
      -

      getMut

      -
      MutateNda<V> getMut()
      +
    + + + +
      +
    • +

      getMut

      +
      MutateNda<V> getMut()
      This method exposes an API for mutating the state of this tensor. The usage of methods exposed by this API is generally discouraged because the exposed state can easily lead to broken tensors and exceptional situations!
      @@ -1991,16 +2382,19 @@

      getMut

      performance is critical!
      (Like in custom backend extensions for example)
      -
      -
      Returns:
      +
      +
      Returns:
      The unsafe API exposes methods for mutating the state of the tensor.
      -
    • -
    • -
      -

      mut

      -
      default MutateNda<V> mut()
      +
    + + + +
      +
    • +

      mut

      +
      default MutateNda<V> mut()
      This method exposes an API for mutating the state of this tensor. The usage of methods exposed by this API is generally discouraged because the exposed state can easily lead to broken tensors and exceptional situations!
      @@ -2010,16 +2404,19 @@

      mut

      performance is critical!
      (Like custom backend extensions for example)
      -
      -
      Returns:
      +
      +
      Returns:
      The unsafe API exposes methods for mutating the state of the tensor.
      -
    • -
    • -
      -

      reshape

      -
      Nda<V> reshape(int... shape)
      +
    + + + +
      +
    • +

      reshape

      +
      Nda<V> reshape(int... shape)
      Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape. When possible, the returned nd-array will be a view of this nd-array. @@ -2029,75 +2426,90 @@

      reshape

      Keep in mind that the new shape must have the same number of elements as the original shape.

      This operation supports autograd.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The new shape of the returned nd-array.
      -
      Returns:
      +
      Returns:
      A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
      -
    • -
    • -
      -

      permute

      -
      Nda<V> permute(int... dims)
      +
    + + + +
      +
    • +

      permute

      +
      Nda<V> permute(int... dims)
      Returns a view of the original tensor input with its dimensions permuted.
      Consider a 3-dimensional tensor x with shape (2×3×5), then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
      -
      -
      Parameters:
      +
      +
      Parameters:
      dims - The desired ordering of dimensions
      -
      Returns:
      +
      Returns:
      A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
      -
    • -
    • -
      -

      transpose

      -
      Nda<V> transpose(int dim1, - int dim2)
      +
    + + + +
      +
    • +

      transpose

      +
      Nda<V> transpose(int dim1,
      +                 int dim2)
      Returns a view of the original tensor input the targeted axes are swapped / transposed.
      -
      -
      Parameters:
      +
      +
      Parameters:
      dim1 - The first dimension to be swapped.
      dim2 - The second dimension to be swapped.
      -
      Returns:
      +
      Returns:
      A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
      -
    • -
    • -
      -

      at

      -
      Nda.Item<V> at(int... indices)
      -
      This method exposes the Nda.Item API which allows you to get or set +
    + + + +
      +
    • +

      at

      +
      Nda.Item<V> at(int... indices)
      +
      This method exposes the Nda.Item API which allows you to get or set individual items within this nd-array targeted by an array of provided indices.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - An array of indices targeting a particular position in this nd-array...
      -
      Returns:
      +
      Returns:
      An object which allows you to get or set individual items within this nd-array.
      -
    • -
    • -
      -

      toString

      -
      default String toString(NDPrintSettings config)
      -
      Use this to turn this nd-array into a String instance based on the provided - NDPrintSettings instance, which allows you to configure things +
    + + + +
      +
    • +

      toString

      +
      default java.lang.String toString(NDPrintSettings config)
      +
      Use this to turn this nd-array into a String instance based on the provided + NDPrintSettings instance, which allows you to configure things like the number of chars per entry, delimiters, the number of items per line, etc.
      -
    • -
    • -
      -

      toString

      -
      default String toString(Consumer<NDPrintSettings> config)
      +
    + + + +
      +
    • +

      toString

      +
      default java.lang.String toString(java.util.function.Consumer<NDPrintSettings> config)
      This allows you to provide a lambda which configures how this nd-array should be - converted to String instances. - The provided Consumer will receive a NDPrintSettings instance + converted to String instances. + The provided Consumer will receive a NDPrintSettings instance which allows you to change various settings with the help of method chaining.
      Here is an example:
      
      @@ -2109,36 +2521,98 @@ 

      toString

      .setCellSize(15) );
      -
      -
      Parameters:
      -
      config - A consumer of the NDPrintSettings ready to be configured.
      -
      Returns:
      -
      The String representation of this nd-array.
      -
      - -
    • -
    • -
      -

      toString

      -
      String toString()
      -
      This method returns a String representation of this nd-array. +
      +
      Parameters:
      +
      config - A consumer of the NDPrintSettings ready to be configured.
      +
      Returns:
      +
      The String representation of this nd-array.
      +
      +
    • +
    + + + +
      +
    • +

      toString

      +
      java.lang.String toString()
      +
      This method returns a String representation of this nd-array. The default settings are used for the conversion.
      -
      -
      Overrides:
      -
      toString in class Object
      -
      Returns:
      -
      The String representation of this nd-array.
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      +
      Returns:
      +
      The String representation of this nd-array.
      -
    - - + + + + - + + + + diff --git a/docs/jdocs/neureka/Neureka.Settings.AutoGrad.html b/docs/jdocs/neureka/Neureka.Settings.AutoGrad.html index beb662a57..c59937571 100644 --- a/docs/jdocs/neureka/Neureka.Settings.AutoGrad.html +++ b/docs/jdocs/neureka/Neureka.Settings.AutoGrad.html @@ -1,225 +1,290 @@ - + + - -Neureka.Settings.AutoGrad (neureka 1.0.0 API) - - - - + +Neureka.Settings.AutoGrad (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka.Settings.AutoGrad

    -
    -
    java.lang.Object -
    neureka.Neureka.Settings.AutoGrad
    +
    neureka
    +

    Class Neureka.Settings.AutoGrad

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka.Settings.AutoGrad
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      Neureka.Settings
      +
      Neureka.Settings

      -
      public class Neureka.Settings.AutoGrad -extends Object
      +
      +
      public class Neureka.Settings.AutoGrad
      +extends java.lang.Object
      This class contains settings which are related to the automatic differentiation of tensors.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AutoGrad

        -
        public AutoGrad()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AutoGrad

            +
            public AutoGrad()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      isPreventingInlineOperations

      -
      public boolean isPreventingInlineOperations()
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isPreventingInlineOperations

          +
          public boolean isPreventingInlineOperations()
          Inline operations are operations where the data of a tensor passed into an operation is being modified. Usually the result of an operation is stored inside a new tensor. Use this flag to detect if an operation is an inline operation.
          -
    • -
    • -
      -

      setIsPreventingInlineOperations

      -
      public void setIsPreventingInlineOperations(boolean prevent)
      +
    + + + +
      +
    • +

      setIsPreventingInlineOperations

      +
      public void setIsPreventingInlineOperations(boolean prevent)
      Inline operations are operations where the data of a tensor passed into an operation is being modified. Usually the result of an operation is stored inside a new tensor. Use this flag to detect if an operation is an inline operation.
      -
  • -
  • -
    -

    isRetainingPendingErrorForJITProp

    -
    public boolean isRetainingPendingErrorForJITProp()
    + + + + +
      +
    • +

      isRetainingPendingErrorForJITProp

      +
      public boolean isRetainingPendingErrorForJITProp()
      This flag enables an optimization technique which only propagates error values to gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them at divergent differentiation paths within the computation graph.
      @@ -228,12 +293,15 @@

      isRetainingPendingErrorForJITProp

      This technique however uses more memory but will improve performance for some networks substantially. The technique is termed JIT-Propagation.
      -
  • -
  • -
    -

    setIsRetainingPendingErrorForJITProp

    -
    public void setIsRetainingPendingErrorForJITProp(boolean retain)
    + + + + +
      +
    • +

      setIsRetainingPendingErrorForJITProp

      +
      public void setIsRetainingPendingErrorForJITProp(boolean retain)
      This flag enables an optimization technique which only propagates error values to gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them at divergent differentiation paths within the computation graph.
      @@ -242,40 +310,49 @@

      setIsRetainingPendingErrorForJITProp

      This technique however uses more memory but will improve performance for some networks substantially. The technique is termed JIT-Propagation.
      -
  • -
  • -
    -

    isApplyingGradientWhenTensorIsUsed

    -
    public boolean isApplyingGradientWhenTensorIsUsed()
    + + + + +
      +
    • +

      isApplyingGradientWhenTensorIsUsed

      +
      public boolean isApplyingGradientWhenTensorIsUsed()
      Gradients will automatically be applied (or JITed) to tensors as soon as - they are being used for calculation (GraphNode instantiation). + they are being used for calculation (GraphNode instantiation). This feature works well with JIT-Propagation.
      -
  • -
  • -
    -

    setIsApplyingGradientWhenTensorIsUsed

    -
    public void setIsApplyingGradientWhenTensorIsUsed(boolean apply)
    + + + + +
      +
    • +

      setIsApplyingGradientWhenTensorIsUsed

      +
      public void setIsApplyingGradientWhenTensorIsUsed(boolean apply)
      Gradients will automatically be applied (or JITed) to tensors as soon as - they are being used for calculation (GraphNode instantiation). + they are being used for calculation (GraphNode instantiation). This feature works well with JIT-Propagation.
      -
      -
      Parameters:
      +
      +
      Parameters:
      apply - The flag determining if gradients should be applied when their tensors are used.
      -
  • -
  • -
    -

    isApplyingGradientWhenRequested

    -
    public boolean isApplyingGradientWhenRequested()
    + + + + +
      +
    • +

      isApplyingGradientWhenRequested

      +
      public boolean isApplyingGradientWhenRequested()
      Gradients will only be applied if requested. Usually this happens immediately, however if the flag 'applyGradientWhenTensorIsUsed' is set to true, then the tensor will only be updated by its gradient if requested AND the tensor is used for calculation! - (GraphNode instantiation).

      + (GraphNode instantiation).

      This flag works alongside two other autograd features which can be enabled by flipping the feature flags
      'isApplyingGradientWhenRequested' and 'isApplyingGradientWhenTensorIsUsed'
      @@ -286,44 +363,109 @@

      isApplyingGradientWhenRequested

      Setting both flags to true will inhibit the effect of the second setting 'isApplyingGradientWhenTensorIsUsed' unless a form of "permission" is being signaled to the autograd system. This signal comes in the form of a "request" flag which marks a tensor as allowed to - be updated by its gradient. This request can be dispatched to a Tensor - by setting Tensor.setGradientApplyRequested(boolean) to true.
      -
      -
      Returns:
      + be updated by its gradient. This request can be dispatched to a Tensor + by setting Tensor.setGradientApplyRequested(boolean) to true.
  • +
    +
    Returns:
    The truth value determining if gradients should be applied upon request.
    - -
  • -
    -

    setIsApplyingGradientWhenRequested

    -
    public void setIsApplyingGradientWhenRequested(boolean apply)
    + + + + +
      +
    • +

      setIsApplyingGradientWhenRequested

      +
      public void setIsApplyingGradientWhenRequested(boolean apply)
      Gradients will only be applied if requested. Usually this happens immediately, however if the flag 'applyGradientWhenTensorIsUsed' is set to true, then the tensor will only be updated by its gradient if requested AND the tensor is used for calculation! - (GraphNode instantiation).
      -
    + (GraphNode instantiation).
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/Neureka.Settings.DType.html b/docs/jdocs/neureka/Neureka.Settings.DType.html index ec84a05f3..92041b51a 100644 --- a/docs/jdocs/neureka/Neureka.Settings.DType.html +++ b/docs/jdocs/neureka/Neureka.Settings.DType.html @@ -1,235 +1,365 @@ - + + - -Neureka.Settings.DType (neureka 1.0.0 API) - - - - + +Neureka.Settings.DType (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka.Settings.DType

    +
    neureka
    +

    Class Neureka.Settings.DType

    -
    java.lang.Object -
    neureka.Neureka.Settings.DType
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka.Settings.DType
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class Neureka.Settings.DType
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        DType

        -
        public DType()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            DType

            +
            public DType()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getDefaultDataTypeClass

      -
      public Class<?> getDefaultDataTypeClass()
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDefaultDataTypeClass

          +
          public java.lang.Class<?> getDefaultDataTypeClass()
          The default data type is not relevant most of the time. However, if a tensor is being constructed without providing a type class, then this property will be used.
          -
    • -
    • -
      -

      getDefaultDataType

      -
      public DataType<?> getDefaultDataType()
      -
      +
    + + + +
      +
    • +

      getDefaultDataType

      +
      public DataType<?> getDefaultDataType()
    • -
    • -
      -

      setDefaultDataTypeClass

      -
      public void setDefaultDataTypeClass(Class<?> dtype)
      +
    + + + +
      +
    • +

      setDefaultDataTypeClass

      +
      public void setDefaultDataTypeClass(java.lang.Class<?> dtype)
      The default data type is not relevant most of the time. However, if a tensor is being constructed without providing a type class, then this property will be used.
      -
  • -
  • -
    -

    getIsAutoConvertingExternalDataToJVMTypes

    -
    public boolean getIsAutoConvertingExternalDataToJVMTypes()
    + + + + +
      +
    • +

      getIsAutoConvertingExternalDataToJVMTypes

      +
      public boolean getIsAutoConvertingExternalDataToJVMTypes()
      This flag will determine if foreign data types will be converted into the next best fit (in terms of bits) or if it should be converted into something that does not mess with the representation of the data. For example an unsigned int can be converted bit-wise into a JVM int, or it could be converted to a JVM long type in order to be compatible with JVM operations...
      -
  • -
  • -
    -

    setIsAutoConvertingExternalDataToJVMTypes

    -
    public void setIsAutoConvertingExternalDataToJVMTypes(boolean autoConvert)
    + + + + +
      +
    • +

      setIsAutoConvertingExternalDataToJVMTypes

      +
      public void setIsAutoConvertingExternalDataToJVMTypes(boolean autoConvert)
      This flag will determine if foreign data types will be converted into the next best fit (in terms of bits) or if it should be converted into something that does not mess with the representation of the data. For example an unsigned int can be converted bit-wise into a JVM int, or it could be converted to a JVM long type in order to be compatible with JVM operations...
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Neureka.Settings.Debug.html b/docs/jdocs/neureka/Neureka.Settings.Debug.html index 49057a68e..dba10fe3e 100644 --- a/docs/jdocs/neureka/Neureka.Settings.Debug.html +++ b/docs/jdocs/neureka/Neureka.Settings.Debug.html @@ -1,174 +1,229 @@ - + + - -Neureka.Settings.Debug (neureka 1.0.0 API) - - - - + +Neureka.Settings.Debug (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka.Settings.Debug

    +
    neureka
    +

    Class Neureka.Settings.Debug

    -
    java.lang.Object -
    neureka.Neureka.Settings.Debug
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka.Settings.Debug
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class Neureka.Settings.Debug
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Debug

        -
        public Debug()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Debug

            +
            public Debug()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      isKeepingDerivativeTargetPayloads

      -
      public boolean isKeepingDerivativeTargetPayloads()
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isKeepingDerivativeTargetPayloads

          +
          public boolean isKeepingDerivativeTargetPayloads()
          Every derivative is calculated with respect to some graph node. Graph nodes contain payload tensors. A tensor might not always be used for backpropagation, @@ -182,12 +237,15 @@

          isKeepingDerivativeTargetPayloads

          The flag determines this behavior with respect to target nodes. It is used in the test suit to validate that the right tensors were calculated. This flag should not be modified in production! (memory leak)
          -
    • -
    • -
      -

      setIsKeepingDerivativeTargetPayloads

      -
      public void setIsKeepingDerivativeTargetPayloads(boolean keep)
      +
    + + + +
      +
    • +

      setIsKeepingDerivativeTargetPayloads

      +
      public void setIsKeepingDerivativeTargetPayloads(boolean keep)
      Every derivative is calculated with respect to some graph node. Graph nodes contain payload tensors. A tensor might not always be used for backpropagation, @@ -201,52 +259,120 @@

      setIsKeepingDerivativeTargetPayloads

      The flag determines this behavior with respect to target nodes. It is used in the test suit to validate that the right tensors were calculated. This flag should not be modified in production! (memory leak)
      -
  • -
  • -
    -

    isDeletingIntermediateTensors

    -
    public boolean isDeletingIntermediateTensors()
    -
    Function instances will produce hidden intermediate results + + + + +
      +
    • +

      isDeletingIntermediateTensors

      +
      public boolean isDeletingIntermediateTensors()
      +
      Function instances will produce hidden intermediate results when executing an array of inputs. These tensors might not always be used for backpropagation, which means they will be deleted if possible. Tensors are not deleted of they are leave tensors (They are created by the user or require gradients) or they are angle points between forward- and reverse-mode-AutoDiff! This flag should not be modified in production! (memory leak)
      -
  • -
  • -
    -

    setIsDeletingIntermediateTensors

    -
    public void setIsDeletingIntermediateTensors(boolean delete)
    -
    Function instances will produce hidden intermediate results + + + + +
      +
    • +

      setIsDeletingIntermediateTensors

      +
      public void setIsDeletingIntermediateTensors(boolean delete)
      +
      Function instances will produce hidden intermediate results when executing an array of inputs. These tensors might not always be used for backpropagation, which means they will be deleted if possible. Tensors are not deleted of they are leave tensors (They are created by the user or require gradients) or they are angle points between forward- and reverse-mode-AutoDiff! This flag should not be modified in production! (memory leak)
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Neureka.Settings.NDim.html b/docs/jdocs/neureka/Neureka.Settings.NDim.html index ea9a125f1..d35a5fbb7 100644 --- a/docs/jdocs/neureka/Neureka.Settings.NDim.html +++ b/docs/jdocs/neureka/Neureka.Settings.NDim.html @@ -1,207 +1,325 @@ - + + - -Neureka.Settings.NDim (neureka 1.0.0 API) - - - - + +Neureka.Settings.NDim (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka.Settings.NDim

    -
    -
    java.lang.Object -
    neureka.Neureka.Settings.NDim
    +
    neureka
    +

    Class Neureka.Settings.NDim

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka.Settings.NDim
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      Neureka.Settings
      +
      Neureka.Settings

      -
      public class Neureka.Settings.NDim -extends Object
      +
      +
      public class Neureka.Settings.NDim
      +extends java.lang.Object
      Settings for configuring the access pattern of nd-arrays/tensors.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        NDim

        -
        public NDim()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            NDim

            +
            public NDim()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      isOnlyUsingDefaultNDConfiguration

      -
      public boolean isOnlyUsingDefaultNDConfiguration()
      -
      This flag determines which NDConfiguration implementations +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isOnlyUsingDefaultNDConfiguration

          +
          public boolean isOnlyUsingDefaultNDConfiguration()
          +
          This flag determines which NDConfiguration implementations should be used for nd-arrays/tensors. - If this flag is set to true, then the less performant general purpose NDConfiguration + If this flag is set to true, then the less performant general purpose NDConfiguration will be used for all nd-arrays/tensors.
          -
          -
          Returns:
          -
          The truth value determining if only the default SlicedNDConfiguration should be used.
          +
          +
          Returns:
          +
          The truth value determining if only the default SlicedNDConfiguration should be used.
          -
    • -
    • -
      -

      setIsOnlyUsingDefaultNDConfiguration

      -
      public void setIsOnlyUsingDefaultNDConfiguration(boolean enabled)
      -
      Setting this flag determines which NDConfiguration implementations +
    + + + +
      +
    • +

      setIsOnlyUsingDefaultNDConfiguration

      +
      public void setIsOnlyUsingDefaultNDConfiguration(boolean enabled)
      +
      Setting this flag determines which NDConfiguration implementations should be used for nd-arrays/tensors. - If this flag is set to true, then the less performant general purpose NDConfiguration + If this flag is set to true, then the less performant general purpose NDConfiguration will be used for all nd-arrays/tensors.
      -
      -
      Parameters:
      -
      enabled - The truth value determining if only the default SlicedNDConfiguration should be used.
      +
      +
      Parameters:
      +
      enabled - The truth value determining if only the default SlicedNDConfiguration should be used.
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Neureka.Settings.View.html b/docs/jdocs/neureka/Neureka.Settings.View.html index 8506ab503..d722e2e0a 100644 --- a/docs/jdocs/neureka/Neureka.Settings.View.html +++ b/docs/jdocs/neureka/Neureka.Settings.View.html @@ -1,172 +1,283 @@ - + + - -Neureka.Settings.View (neureka 1.0.0 API) - - - - + +Neureka.Settings.View (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka.Settings.View

    +
    neureka
    +

    Class Neureka.Settings.View

    -
    java.lang.Object -
    neureka.Neureka.Settings.View
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka.Settings.View
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
      -
      Settings for configuring how tensors should be converted to String representations.
      +
      +
      public class Neureka.Settings.View
      +extends java.lang.Object
      +
      Settings for configuring how objects should be converted to String representations.
      +
    • +
    -
    void
    - -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        getNDPrintSettings

        -
        public NDPrintSettings getNDPrintSettings()
        -
        Settings for configuring how tensors should be converted to String representations.
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            getNDPrintSettings

            +
            public NDPrintSettings getNDPrintSettings()
            +
            Settings for configuring how tensors should be converted to String representations.
          • -
          • -
            -

            ndArrays

            -
            public void ndArrays(Consumer<NDPrintSettings> should)
            +
          + + + +
            +
          • +

            ndArrays

            +
            public void ndArrays(java.util.function.Consumer<NDPrintSettings> should)
            This allows you to provide a lambda to configure how tensors should be - converted to String instances. - The provided Consumer will receive a NDPrintSettings instance + converted to String instances. + The provided Consumer will receive a NDPrintSettings instance which allows you to change various settings with the help of method chaining.
            -
            -
            Parameters:
            -
            should - A consumer of the NDPrintSettings ready to be configured.
            +
            +
            Parameters:
            +
            should - A consumer of the NDPrintSettings ready to be configured.
            -
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Neureka.Settings.html b/docs/jdocs/neureka/Neureka.Settings.html index 450924993..7daccec18 100644 --- a/docs/jdocs/neureka/Neureka.Settings.html +++ b/docs/jdocs/neureka/Neureka.Settings.html @@ -1,304 +1,463 @@ - + + - -Neureka.Settings (neureka 1.0.0 API) - - - - + +Neureka.Settings (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka.Settings

    -
    -
    java.lang.Object -
    neureka.Neureka.Settings
    +
    neureka
    +

    Class Neureka.Settings

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka.Settings
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      Neureka
      +
      Neureka

      -
      public class Neureka.Settings -extends Object
      -
      This class hosts the settings of the Neureka instance which will be used throughout the library.
      -
    -
    -
      +
      +
      public class Neureka.Settings
      +extends java.lang.Object
      +
      This class hosts the settings of the Neureka instance which will be used throughout the library.
      + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
      +
    • +

      dtype

      +
      public Neureka.Settings.DType dtype(java.lang.Object closure)
      This allows you to configure Neureka using a Groovy DSL.
      -
    • -
    • -
      -

      isLocked

      -
      public boolean isLocked()
      +
    + + + +
      +
    • +

      isLocked

      +
      public boolean isLocked()
      Locked settings can only be read but not written to. - Trying to write to a locked Neureka.Settings instance will not have an effect. + Trying to write to a locked Neureka.Settings instance will not have an effect. The attempt, however, will be logged.
      -
    • -
    • -
      -

      setIsLocked

      -
      public void setIsLocked(boolean locked)
      -
      Can be used to lock or unlock the settings of the current thread-local Neureka instance. +
    + + + +
      +
    • +

      setIsLocked

      +
      public void setIsLocked(boolean locked)
      +
      Can be used to lock or unlock the settings of the current thread-local Neureka instance. Locked settings can only be read but not written to. - Trying to write to a locked Neureka.Settings instance will not have an effect. + Trying to write to a locked Neureka.Settings instance will not have an effect. The attempt, however, will be logged.
      -
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Neureka.Utility.html b/docs/jdocs/neureka/Neureka.Utility.html index 27689a897..6746a8c1b 100644 --- a/docs/jdocs/neureka/Neureka.Utility.html +++ b/docs/jdocs/neureka/Neureka.Utility.html @@ -1,174 +1,284 @@ - + + - -Neureka.Utility (neureka 1.0.0 API) - - - - + +Neureka.Utility (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka.Utility

    +
    neureka
    +

    Class Neureka.Utility

    -
    java.lang.Object -
    neureka.Neureka.Utility
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka.Utility
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public static class Neureka.Utility
      +extends java.lang.Object
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Utility() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
      + -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Utility

        -
        public Utility()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Utility

            +
            public Utility()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      readResource

      -
      public String readResource(String path)
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          readResource

          +
          public java.lang.String readResource(java.lang.String path)
          Helper method which reads the file with the given name and returns the contents of this file as a String. Will exit the application if the file can not be read.
          -
          -
          Parameters:
          +
          +
          Parameters:
          path - The path to the jar resource.
          -
          Returns:
          +
          Returns:
          The contents of the file
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Neureka.html b/docs/jdocs/neureka/Neureka.html index 1afce9168..cbfcd8d9e 100644 --- a/docs/jdocs/neureka/Neureka.html +++ b/docs/jdocs/neureka/Neureka.html @@ -1,354 +1,514 @@ - + + - -Neureka (neureka 1.0.0 API) - - - - + +Neureka (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Class Neureka

    -
    -
    java.lang.Object -
    neureka.Neureka
    +
    neureka
    +

    Class Neureka

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.Neureka
      • +
      +
    • +
    +
    +
      +

    • -
      public final class Neureka -extends Object
      -
      Neureka is the key access point for thread local / global library settings ( seeNeureka.Settings) - as well as execution contexts (see BackendContext) - and pre-instantiated Functions. - Neureka exposes the execution context via the backend() method, +
      +
      public final class Neureka
      +extends java.lang.Object
      +
      Neureka is the key access point for thread local / global library settings ( seeNeureka.Settings) + as well as execution contexts (see BackendContext) + and pre-instantiated Functions. + Neureka exposes the execution context via the backend() method, the library settings which govern the behaviour of various library components - can be accessed via the settings() method. - Common functions can be accessed within a given BackendContext instance based on which they were built. + can be accessed via the settings() method. + Common functions can be accessed within a given BackendContext instance based on which they were built. If one wishes to modify the default library settings it is possible to do so by editing the "library_settings.groovy" DSL file.
      -
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      class 
      - -
      -
      This class hosts the settings of the Neureka instance which will be used throughout the library.
      -
      -
      static class 
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        backend

        -
        public BackendContext backend()
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            backend

            +
            public BackendContext backend()
          • -
          • -
            -

            get

            -
            public static Neureka get()
            -
            The Neureka class represents the configuration of this library. +
          + + + +
            +
          • +

            get

            +
            public static Neureka get()
            +
            The Neureka class represents the configuration of this library. Instances of this configuration are stored local to every thread in order to make both the library settings and the execution context threadsafe! - This method will return the Neureka instance which corresponds to the thread calling it.
            -
            -
            Returns:
            -
            The thread local library configuration state called Neureka.
            + This method will return the Neureka instance which corresponds to the thread calling it.
    +
    +
    Returns:
    +
    The thread local library configuration state called Neureka.
    - -
  • -
    -

    set

    -
    public static void set(Neureka instance)
    -
    Neureka is a thread local singleton. - Therefore, this method will only set the provided Neureka instance + + + + +
      +
    • +

      set

      +
      public static void set(Neureka instance)
      +
      Neureka is a thread local singleton. + Therefore, this method will only set the provided Neureka instance for the thread which is calling this method. - Other threads calling the get() method to retrieve the instance + Other threads calling the get() method to retrieve the instance will get their own instance... (This can theoretically be bypassed by sharing instances)
      -
      -
      Parameters:
      -
      instance - The Neureka instance which ought to be set as thread local singleton.
      +
      +
      Parameters:
      +
      instance - The Neureka instance which ought to be set as thread local singleton.
      -
  • -
  • -
    -

    configure

    -
    public static Neureka configure(Object closure)
    + + + + +
      +
    • +

      configure

      +
      public static Neureka configure(java.lang.Object closure)
      This allows you to configure Neureka using a Groovy DSL.
      -
      -
      Parameters:
      +
      +
      Parameters:
      closure - A Groovy closure to allow for DSL type configuring.
      -
      Returns:
      -
      The thread-local Neureka singleton instance.
      +
      Returns:
      +
      The thread-local Neureka singleton instance.
      -
  • -
  • -
    -

    canAccessOpenCL

    -
    public boolean canAccessOpenCL()
    -
    -
    Returns:
    + + + + +
      +
    • +

      canAccessOpenCL

      +
      public boolean canAccessOpenCL()
      +
      +
      Returns:
      The truth value determining if OpenCL is accessible.
      -
  • -
  • -
    -

    canAccessOpenCLDevice

    -
    public boolean canAccessOpenCLDevice()
    -
    -
    Returns:
    -
    The truth value determining if at least 1 OpenCLDevice is accessible.
    + + + + +
      +
    • +

      canAccessOpenCLDevice

      +
      public boolean canAccessOpenCLDevice()
      +
      +
      Returns:
      +
      The truth value determining if at least 1 OpenCLDevice is accessible.
      -
  • -
  • -
    -

    settings

    -
    public Neureka.Settings settings()
    -
    -
    Returns:
    -
    An instance of library wide Neureka.Settings determining the behaviour of many classes...
    + + + + +
  • -
  • -
    -

    settings

    -
    public Neureka.Settings settings(Object closure)
    + + + + +
      +
    • +

      settings

      +
      public Neureka.Settings settings(java.lang.Object closure)
      This allows you to configure Neureka using a Groovy DSL.
      -
  • -
  • -
    -

    utility

    -
    public Neureka.Utility utility()
    -
    -
    Returns:
    + + + + +
      +
    • +

      utility

      +
      public Neureka.Utility utility()
      +
      +
      Returns:
      An instance of an utility class useful for loading resources or checking if they are even available.
      -
  • -
  • -
    -

    version

    -
    public static String version()
    -
    -
    Returns:
    + + + + +
      +
    • +

      version

      +
      public static java.lang.String version()
      +
      +
      Returns:
      The semantic version of the Neureka library.
      -
  • -
  • -
    -

    reset

    -
    public void reset()
    + + + + +
      +
    • +

      reset

      +
      public void reset()
      This method will try to reload the "library_settings.groovy" script - which will re-configure the library wide Neureka.Settings instance nested inside Neureka. + which will re-configure the library wide Neureka.Settings instance nested inside Neureka. If the execution of this file fails then the settings will be reverted to a hardcoded default state.
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • -
  • -
    -

    getBackend

    -
    public BackendContext getBackend()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getBackend

      +
      public BackendContext getBackend()
      +
      +
      Returns:
      A context object which is expected to host all the tensor operations...
      -
  • -
  • -
    -

    setBackend

    -
    public void setBackend(BackendContext backendContext)
    + + + + +
      +
    • +

      setBackend

      +
      public void setBackend(BackendContext backendContext)
      Use this method to attach a backend context (for operations) to this thread local library context.
      -
      -
      Parameters:
      -
      backendContext - The BackendContext which should be set for this thread local library context.
      +
      +
      Parameters:
      +
      backendContext - The BackendContext which should be set for this thread local library context.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Shape.html b/docs/jdocs/neureka/Shape.html index b018bd051..67140b29d 100644 --- a/docs/jdocs/neureka/Shape.html +++ b/docs/jdocs/neureka/Shape.html @@ -1,406 +1,568 @@ - + + - -Shape (neureka 1.0.0 API) - - - - + +Shape (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Interface Shape

    +
    neureka
    +

    Interface Shape

    -
    -
    +
    +
    +
      +
    • +
      All Superinterfaces:
      -
      Iterable<Integer>
      +
      java.lang.Iterable<java.lang.Integer>

      -
      public interface Shape -extends Iterable<Integer>
      +
      +
      public interface Shape
      +extends java.lang.Iterable<java.lang.Integer>
      Basically a tuple of integers which is used to describe the shape of an array. The shape of an array is the number of elements in each dimension. - A Shape is an immutable monadic type, which means that you can transform - a Shape into another Shape by applying a function to it, like for example - through the map(java.util.function.Function) method.
      -
    -
    -
    + + +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default boolean
      -
      any(Predicate<Integer> predicate)
      -
       
      -
      default int
      -
      count(Predicate<Integer> predicate)
      -
       
      -
      default int
      - -
       
      -
      default boolean
      -
      every(Predicate<Integer> predicate)
      -
       
      -
      default Shape
      -
      filter(Predicate<Integer> predicate)
      -
       
      -
      int
      -
      get(int i)
      -
       
      -
      default Iterator<Integer>
      - -
       
      -
      default Shape
      - -
      -
      This method is used to transform a Shape into another Shape +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default booleanany(java.util.function.Predicate<java.lang.Integer> predicate) 
        default intcount(java.util.function.Predicate<java.lang.Integer> predicate) 
        default intelements() 
        default booleanevery(java.util.function.Predicate<java.lang.Integer> predicate) 
        default Shapefilter(java.util.function.Predicate<java.lang.Integer> predicate) 
        intget(int i) 
        default java.util.Iterator<java.lang.Integer>iterator() 
        default Shapemap(java.util.function.Function<java.lang.Integer,java.lang.Integer> mapper) +
        This method is used to transform a Shape into another Shape by applying a function to it.
        - -
        static Shape
        -
        of(int... shape)
        -
        -
        This method is used to create a Shape instance from an array of integers.
        -
        -
        static Shape
        -
        of(Iterable<? extends Number> shape)
        -
        -
        This method is used to create a Shape instance from an iterable of numbers +
        static Shapeof(int... shape) +
        This method is used to create a Shape instance from an array of integers.
        +
        static Shapeof(java.lang.Iterable<? extends java.lang.Number> shape) +
        This method is used to create a Shape instance from an iterable of numbers whose integer values are used to describe the shape of a nd-array.
        - -
        static Shape
        -
        of(List<? extends Number> shape)
        -
        -
        This method is used to create a Shape instance from a list of numbers +
        static Shapeof(java.util.List<? extends java.lang.Number> shape) +
        This method is used to create a Shape instance from a list of numbers whose integer values are used to describe the shape of a nd-array.
        - -
        static Shape
        -
        of(Stream<? extends Number> shape)
        -
        -
        This method is used to create a Shape instance from a stream of numbers +
        static Shapeof(java.util.stream.Stream<? extends java.lang.Number> shape) +
        This method is used to create a Shape instance from a stream of numbers whose integer values are used to describe the shape of a nd-array.
        - -
        int
        - -
         
        -
        default Shape
        -
        slice(int start)
        -
         
        -
        default Shape
        -
        slice(int start, - int end)
        -
         
        -
        default Stream<Integer>
        - -
         
        -
        default int[]
        - -
         
        - - - -
        -

        Methods inherited from interface java.lang.Iterable

        -forEach, spliterator
        - +
        intsize() 
        default Shapeslice(int start) 
        default Shapeslice(int start, + int end) 
        default java.util.stream.Stream<java.lang.Integer>stream() 
        default int[]toIntArray() 
        +
          +
        • + + +

          Methods inherited from interface java.lang.Iterable

          +forEach, spliterator
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        static Shape of(List<? extends Number> shape)
        -
        This method is used to create a Shape instance from a list of numbers +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            static Shape of(java.util.List<? extends java.lang.Number> shape)
            +
            This method is used to create a Shape instance from a list of numbers whose integer values are used to describe the shape of a nd-array. The shape of an array is the number of elements in each dimension.
            -
            -
            Parameters:
            +
            +
            Parameters:
            shape - The list of integers which is used to describe the shape of an array.
            -
            Returns:
            -
            A Shape instance which is created from the given array of integers.
            +
            Returns:
            +
            A Shape instance which is created from the given array of integers.
            -
      • -
      • -
        -

        of

        -
        static Shape of(Stream<? extends Number> shape)
        -
        This method is used to create a Shape instance from a stream of numbers +
      + + + +
        +
      • +

        of

        +
        static Shape of(java.util.stream.Stream<? extends java.lang.Number> shape)
        +
        This method is used to create a Shape instance from a stream of numbers whose integer values are used to describe the shape of a nd-array. The shape of an array is the number of elements in each dimension.
        -
        -
        Parameters:
        +
        +
        Parameters:
        shape - The stream of integers which is used to describe the shape of an array.
        -
        Returns:
        -
        A Shape instance which is created from the given array of integers.
        +
        Returns:
        +
        A Shape instance which is created from the given array of integers.
        -
    • -
    • -
      -

      of

      -
      static Shape of(Iterable<? extends Number> shape)
      -
      This method is used to create a Shape instance from an iterable of numbers +
    + + + +
      +
    • +

      of

      +
      static Shape of(java.lang.Iterable<? extends java.lang.Number> shape)
      +
      This method is used to create a Shape instance from an iterable of numbers whose integer values are used to describe the shape of a nd-array. The shape of an array is the number of elements in each dimension.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The iterable of integers which is used to describe the shape of an array.
      -
      Returns:
      -
      A Shape instance which is created from the given array of integers.
      +
      Returns:
      +
      A Shape instance which is created from the given array of integers.
      -
    • -
    • -
      -

      of

      -
      static Shape of(int... shape)
      -
      This method is used to create a Shape instance from an array of integers. +
    + + + +
      +
    • +

      of

      +
      static Shape of(int... shape)
      +
      This method is used to create a Shape instance from an array of integers. The array of integers is used to describe the shape of an array. The shape of an array is the number of elements in each dimension.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The array of integers which is used to describe the shape of an array.
      -
      Returns:
      -
      A Shape instance which is created from the given array of integers.
      +
      Returns:
      +
      A Shape instance which is created from the given array of integers.
      -
    • -
    • -
      -

      size

      -
      int size()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      size

      +
      int size()
      +
      +
      Returns:
      The number of dimensions of the shape.
      -
    • -
    • -
      -

      elements

      -
      default int elements()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      elements

      +
      default int elements()
      +
      +
      Returns:
      The number of elements in the shape.
      -
    • -
    • -
      -

      get

      -
      int get(int i)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      get

      +
      int get(int i)
      +
      +
      Parameters:
      i - The index of the dimension/axis.
      -
      Returns:
      +
      Returns:
      The number of elements in the dimension/axis at the given index.
      -
    • -
    • -
      -

      toIntArray

      -
      default int[] toIntArray()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      toIntArray

      +
      default int[] toIntArray()
      +
      +
      Returns:
      This shape as an array of integers.
      -
    • -
    • -
      -

      map

      -
      default Shape map(Function<Integer,Integer> mapper)
      -
      This method is used to transform a Shape into another Shape +
    + + + +
      +
    • +

      map

      +
      default Shape map(java.util.function.Function<java.lang.Integer,java.lang.Integer> mapper)
      +
      This method is used to transform a Shape into another Shape by applying a function to it.
      -
      -
      Parameters:
      -
      mapper - The function which is used to transform the Shape.
      -
      Returns:
      -
      A new Shape instance which is the result of the transformation.
      +
      +
      Parameters:
      +
      mapper - The function which is used to transform the Shape.
      +
      Returns:
      +
      A new Shape instance which is the result of the transformation.
      -
    • -
    • -
      -

      stream

      -
      default Stream<Integer> stream()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      stream

      +
      default java.util.stream.Stream<java.lang.Integer> stream()
      +
      +
      Returns:
      This shape as a stream of integers.
      -
    • -
    • -
      -

      slice

      -
      default Shape slice(int start, - int end)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      slice

      +
      default Shape slice(int start,
      +                    int end)
      +
      +
      Parameters:
      start - The start index of the slice, inclusive.
      end - The end index of the slice, exclusive.
      -
      Returns:
      +
      Returns:
      A slice of this shape starting at the given start index and ending at the given end index.
      -
    • -
    • -
      -

      slice

      -
      default Shape slice(int start)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      slice

      +
      default Shape slice(int start)
      +
      +
      Parameters:
      start - The start index of the slice, inclusive.
      -
      Returns:
      +
      Returns:
      A slice of this shape starting at the given start index and ending at the end of the shape.
      -
    • -
    • -
      -

      filter

      -
      default Shape filter(Predicate<Integer> predicate)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      filter

      +
      default Shape filter(java.util.function.Predicate<java.lang.Integer> predicate)
      +
      +
      Parameters:
      predicate - The predicate which is used to filter the shape.
      -
      Returns:
      +
      Returns:
      A new shape which is the result of filtering this shape with the given predicate.
      -
    • -
    • -
      -

      count

      -
      default int count(Predicate<Integer> predicate)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      count

      +
      default int count(java.util.function.Predicate<java.lang.Integer> predicate)
      +
      +
      Parameters:
      predicate - The predicate which is used to count the elements of the shape for which it is true.
      -
      Returns:
      +
      Returns:
      The number of elements in the shape which satisfy the given predicate.
      -
    • -
    • -
      -

      every

      -
      default boolean every(Predicate<Integer> predicate)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      every

      +
      default boolean every(java.util.function.Predicate<java.lang.Integer> predicate)
      +
      +
      Parameters:
      predicate - The predicate which is used to test the elements of the shape.
      -
      Returns:
      +
      Returns:
      True if the given predicate is true for all elements of the shape.
      -
    • -
    • -
      -

      any

      -
      default boolean any(Predicate<Integer> predicate)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      any

      +
      default boolean any(java.util.function.Predicate<java.lang.Integer> predicate)
      +
      +
      Parameters:
      predicate - The predicate which is used to test the elements of the shape.
      -
      Returns:
      +
      Returns:
      True if the given predicate is true for at least one element of the shape.
      -
    • -
    • -
      -

      iterator

      -
      default Iterator<Integer> iterator()
      -
      -
      Specified by:
      -
      iterator in interface Iterable<Integer>
      -
      Returns:
      +
    + + + +
      +
    • +

      iterator

      +
      default java.util.Iterator<java.lang.Integer> iterator()
      +
      +
      Specified by:
      +
      iterator in interface java.lang.Iterable<java.lang.Integer>
      +
      Returns:
      An iterator over the shape.
      -
    - - + + +
    + - + + + + diff --git a/docs/jdocs/neureka/Tensor.ImageType.html b/docs/jdocs/neureka/Tensor.ImageType.html index 6c8695869..ad5fc0b45 100644 --- a/docs/jdocs/neureka/Tensor.ImageType.html +++ b/docs/jdocs/neureka/Tensor.ImageType.html @@ -1,342 +1,514 @@ - + + - -Tensor.ImageType (neureka 1.0.0 API) - - - - + +Tensor.ImageType (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Enum Class Tensor.ImageType

    -
    -
    java.lang.Object -
    java.lang.Enum<Tensor.ImageType> -
    neureka.Tensor.ImageType
    -
    +
    neureka
    +

    Enum Tensor.ImageType

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • + +
    • +
    +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Field Details

    -
      -
    • -
      -

      bufferType

      -
      public final int bufferType
      -
      +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          bufferType

          +
          public final int bufferType
        • -
        • -
          -

          dataType

          -
          public final DataType<?> dataType
          -
          +
        + + + +
          +
        • +

          dataType

          +
          public final DataType<?> dataType
        • -
        • -
          -

          numberOfChannels

          -
          public final int numberOfChannels
          -
          +
        + + + +
          +
        • +

          numberOfChannels

          +
          public final int numberOfChannels
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static Tensor.ImageType[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Tensor.ImageType[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Tensor.ImageType c : Tensor.ImageType.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static Tensor.ImageType valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static Tensor.ImageType valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/Tensor.html b/docs/jdocs/neureka/Tensor.html index 7cbe39373..9f27a48c7 100644 --- a/docs/jdocs/neureka/Tensor.html +++ b/docs/jdocs/neureka/Tensor.html @@ -1,89 +1,115 @@ - + + - -Tensor (neureka 1.0.0 API) - - - - + +Tensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka
    -

    Interface Tensor<V>

    -
    -
    -
    -
    Type Parameters:
    +
    neureka
    +

    Interface Tensor<V>

    +
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The type parameter for the individual value items within this tensor.
      -
      +
      All Superinterfaces:
      -
      Component<Tensor<V>>, ComponentOwner<Tensor<V>>, Iterable<V>, Nda<V>, NDimensional
      +
      Component<Tensor<V>>, ComponentOwner<Tensor<V>>, java.lang.Iterable<V>, Nda<V>, NDimensional

      -
      public interface Tensor<V> -extends Nda<V>, Component<Tensor<V>>, ComponentOwner<Tensor<V>>
      -
      A Tensor is a mathematical concept and type of multidimensional +
      +
      public interface Tensor<V>
      +extends Nda<V>, Component<Tensor<V>>, ComponentOwner<Tensor<V>>
      +
      A Tensor is a mathematical concept and type of multidimensional data-structure with certain transformation properties. Technically however, it is mostly a simple container / data-structure which can house data indexed by N dimensions. Therefore, it is often also described as a nd-array. @@ -98,1329 +124,1563 @@

      Interface Tensor<V>

      Such operations might be simple element-wise operations or more complex linear operations like the dot-product, matrix- or even tensor multiplications.

      - -
      -
        + +
      +
      +
      +
        +
      • -
      • -
        -

        Nested Class Summary

        -
        Nested Classes
        -
        -
        Modifier and Type
        -
        Interface
        -
        Description
        -
        static enum 
        - -
        -
        Use this enum as argument for the asImage(Tensor.ImageType) method to + -
      • -
        -

        Method Summary

        -
        -
        -
        -
        -
        Modifier and Type
        -
        Method
        -
        Description
        -
        default Tensor<V>
        -
        abs()
        -
        +
          +
        • + + +

          Method Summary

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          All Methods Static Methods Instance Methods Abstract Methods Default Methods 
          Modifier and TypeMethod and Description
          default Tensor<V>abs()
          This method is a functionally identical to the following alternatives:
          - -
          default void
          - -
          +
          default voidapplyGradient()
          If this tensor owns a gradient tensor as component, then it can be applied by this method.
          - - - -
          -
          Turns this tensor into a BufferedImage based on the provided - Tensor.ImageType formatting choice.
          -
          -
          <T> T
          -
          asType(Class<T> typeClass)
          -
           
          -
          default Tensor<V>
          - -
          +
          java.awt.image.BufferedImageasImage(Tensor.ImageType type) +
          Turns this tensor into a BufferedImage based on the provided + Tensor.ImageType formatting choice.
          +
          <T> TasType(java.lang.Class<T> typeClass) 
          default Tensor<V>backward()
          Use this to back-propagate an error signal of 1.0 through the recorded computation graph.
          - -
          default Tensor<V>
          -
          backward(double value)
          -
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          -
          -
          default Tensor<V>
          -
          backward(Tensor<V> error)
          -
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          -
          -
          default boolean
          - -
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          -
          -
          default Tensor<V>
          - -
          +
          default Tensor<V>backward(double value) +
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          +
          default Tensor<V>backward(Tensor<V> error) +
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          +
          default booleanbelongsToGraph() +
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          +
          default Tensor<V>cbrt()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          -
          concatAt(int axis, - Nda<V> other)
          -
          +
          default Tensor<V>concatAt(int axis, + Nda<V> other)
          This method concatenates the provided nd-array together with this nd-array along a specified axis.
          - -
          default Tensor<V>
          -
          concatAt(int axis, - Nda<V> other, - Nda<V>... ndArrays)
          -
          +
          default Tensor<V>concatAt(int axis, + Nda<V> other, + Nda<V>... ndArrays)
          This method concatenates the provided nd-arrays together with this nd-array along a specified axis.
          - -
          default boolean
          -
          contains(Tensor<V> other)
          -
          +
          default booleancontains(Tensor<V> other)
          This method name translates to the "in" keyword in Kotlin! The same is true for the "isCase" method in Groovy.
          - -
          default Tensor<V>
          -
          conv(Tensor<V> other)
          -
          +
          default Tensor<V>conv(Tensor<V> other)
          This method performs convolution between this tensor and the one passed as argument.
          - -
          default Tensor<V>
          -
          convDot(Tensor<V> other)
          -
          +
          default Tensor<V>convDot(Tensor<V> other)
          This method performs a convolutional based dot product between the last dimension of this tensor and the first dimension of the passed tensor.
          - -
          default Tensor<V>
          -
          cos()
          -
          +
          default Tensor<V>cos()
          This method is a functionally identical to the following alternatives:
          - - - -
          -
          This is almost identical to the deepCopy() method except that +
          Tensor<V>deepClone() +
          This is almost identical to the deepCopy() method except that the returned tensor will have autograd support, meaning that the cloning will be part of the autograd computation graph, and backpropagation will traverse the cloned tensor as well.
          - - - -
          +
          Tensor<V>deepCopy()
          This method creates and returns a new nd-array instance which is not only a copy of the configuration of this nd-array but also a copy of the underlying data array.
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>detached()
          This method returns a new tensor detached from any underlying computation-graph or simply does nothing if no graph is present.
          - Nodes within a computation graph are instances of the "GraphNode" class which are also + Nodes within a computation graph are instances of the "GraphNode" class which are also simple components of the tensors they represent in the graph.
          - -
          default Tensor<V>
          - -
          -
          This creates a new tensor with the same underlying Data and whose shape is trimmed.
          -
          -
          default Tensor<V>
          -
          div(Tensor<V> other)
          -
          +
          default Tensor<V>dimtrim() +
          This creates a new tensor with the same underlying Data and whose shape is trimmed.
          +
          default Tensor<V>div(Tensor<V> other)
          This method will produce the quotient of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
          - -
          default Tensor<V>
          -
          div(V value)
          -
           
          -
          default Tensor<V>
          -
          dot(Tensor<V> other)
          -
          +
          default Tensor<V>div(V value) 
          default Tensor<V>dot(Tensor<V> other)
          Performs a dot product between the last dimension of this tensor and the first dimension of the provided tensor.
          - -
          default Tensor<V>
          -
          exp()
          -
          +
          default Tensor<V>exp()
          This method is a functionally identical to the following alternatives:
          - -
          default Optional<NDFrame<V>>
          - -
          -
          This is a functionally identical alternative to getFrame().
          -
          -
          default Tensor<V>
          -
          get(int i)
          -
          -
          This getter method creates and returns a slice of the original nd-array.
          -
          -
          default Tensor<V>
          -
          get(int... indices)
          -
          +
          default java.util.Optional<NDFrame<V>>frame() +
          This is a functionally identical alternative to getFrame().
          +
          default Tensor<V>get(int... indices)
          The following method enables access to specific scalar elements within the nd-array.
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>get(int i)
          This getter method creates and returns a slice of the original nd-array.
          - -
          default Tensor<V>
          -
          get(Object key)
          -
          +
          default Tensor<V>get(java.lang.Number i) +
          This getter method creates and returns a slice of the original nd-array.
          +
          default Tensor<V>get(java.lang.Object... args) +
          The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
          +
          default Tensor<V>get(java.lang.Object key)
          This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
          - -
          default Tensor<V>
          -
          get(Object... args)
          -
          -
          The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
          -
          -
          default Tensor<V>
          -
          getAt(int i)
          -
          -
          This getter method creates and returns a slice of the original nd-array.
          -
          - -
          getAt(int... indices)
          -
          +
          Tensor<V>getAt(int... indices)
          The following method enables access to specific scalar elements within the nd-array.
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>getAt(int i)
          This getter method creates and returns a slice of the original nd-array.
          - -
          default Tensor<V>
          -
          getAt(Object... args)
          -
          -
          The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
          -
          - -
          getAt(List<?> key)
          -
          +
          Tensor<V>getAt(java.util.List<?> key)
          This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
          - - -
          getAt(Map<?,Integer> rangToSteps)
          -
          +
          Tensor<V>getAt(java.util.Map<?,java.lang.Integer> rangToSteps)
          This method is most useful when used in Groovy where defining maps is done through square brackets, making it possible to slice nd-arrays like so:
          - - - -
          -
          This method returns the DataType instance of this Tensor, which is +
          default Tensor<V>getAt(java.lang.Number i) +
          This getter method creates and returns a slice of the original nd-array.
          +
          default Tensor<V>getAt(java.lang.Object... args) +
          The following method enables the creation of nd-array slices which access + the same underlying data (possibly from a different view).
          +
          DataType<V>getDataType() +
          This method returns the DataType instance of this Tensor, which is a wrapper object for the actual type class representing the value items stored inside the underlying data array of this tensor.
          - -
          default Device<V>
          - -
           
          -
          default Optional<NDFrame<V>>
          - -
           
          -
          default Optional<Tensor<V>>
          - -
           
          -
          default Optional<GraphNode<V>>
          - -
           
          - - -
          +
          default Device<V>getDevice() 
          default java.util.Optional<NDFrame<V>>getFrame() 
          default java.util.Optional<Tensor<V>>getGradient() 
          default java.util.Optional<GraphNode<V>>getGraphNode() 
          MutateTensor<V>getMut()
          This method exposes an API for mutating the state of this tensor.
          - - - -
          -
          The Class returned by this method is the representative Class of the - value items of a concrete AbstractNda but not necessarily the actual Class of +
          java.lang.Class<?>getRepresentativeItemClass() +
          The Class returned by this method is the representative Class of the + value items of a concrete AbstractNda but not necessarily the actual Class of a given value item, this is especially true for numeric types, which are represented by - implementations of the NumericType interface.
          - -
          default Tensor<V>
          - -
          -
          A method which returns a new Tensor instance which is a transposed twin of this instance.
          - This is an alternative to the functionally identical T() method.
          -
          -
          int
          - -
          + implementations of the NumericType interface.
          +
          default Tensor<V>getT() +
          A method which returns a new Tensor instance which is a transposed twin of this instance.
          + This is an alternative to the functionally identical T() method.
          +
          intgetVersion()
          The version number is tracking how often this tensor has been mutated.
          - -
          default Optional<Tensor<V>>
          - -
          -
          This is a functionally identical alternative to the getGradient() method.
          -
          -
          boolean
          - -
          +
          default java.util.Optional<Tensor<V>>gradient() +
          This is a functionally identical alternative to the getGradient() method.
          +
          booleangradientApplyRequested()
          This flag works alongside two autograd features which can be enabled inside the library settings.
          - -
          default Optional<GraphNode<V>>
          - -
          -
          This is a functionally identical alternative to getGraphNode().
          -
          -
          default boolean
          - -
          +
          default java.util.Optional<GraphNode<V>>graphNode() +
          This is a functionally identical alternative to getGraphNode().
          +
          default booleanhasGradient()
          Tensors can be components of other tensors which makes the implicitly their gradients.
          - -
          boolean
          -
          is(Class<?> typeClass)
          -
          +
          booleanis(java.lang.Class<?> typeClass)
          This method compares the passed class with the underlying data-type of this NDArray.
          - -
          default boolean
          - -
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          -
          -
          boolean
          -
          isCase(Tensor<V> other)
          -
          +
          default booleanisBranch() +
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          +
          booleanisCase(Tensor<V> other)
          This method name translates to the "in" keyword in Groovy! The same is true for the "contains" method in Kotlin.
          - -
          boolean
          - -
          -
          This will check if the MutateTensor.delete() method was previously called on this tensor.
          -
          -
          default boolean
          - -
          -
          A tensor is empty if it's Data storage is null.
          -
          -
          boolean
          - -
          +
          booleanisDeleted() +
          This will check if the MutateTensor.delete() method was previously called on this tensor.
          +
          default booleanisEmpty() +
          A tensor is empty if it's Data storage is null.
          +
          booleanisIntermediate()
          Intermediate tensors are internal non-user tensors which may be eligible - for deletion when further consumed by a Function.
          - -
          default boolean
          - -
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          -
          -
          default boolean
          - -
          -
          Outsourced means that the tensor is stored on a Device implementation instance which is not the CPU.
          -
          -
          default boolean
          - -
          + for deletion when further consumed by a Function.
          +
          default booleanisLeave() +
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
          +
          default booleanisOutsourced() +
          Outsourced means that the tensor is stored on a Device implementation instance which is not the CPU.
          +
          default booleanisPartialSlice()
          If this nd-array is a partial slice of a parent nd-array then this method will yield true.
          - -
          default boolean
          - -
          +
          default booleanisShallowCopy()
          If this nd-array is a shallow copy of a parent nd-array then this method will yield true.
          - -
          default boolean
          - -
          +
          default booleanisSlice()
          If this nd-array is a slice of a parent nd-array then this method will yield true.
          - -
          default boolean
          - -
          +
          default booleanisSliceParent()
          If slices have been derived from this nd-array then it is a "slice parent".
          - -
          default boolean
          - -
          -
          A tensor is "undefined" if it has either no NDConfiguration implementation instance - or this instance does not have a shape set for this Tensor which is needed for +
          default booleanisUndefined() +
          A tensor is "undefined" if it has either no NDConfiguration implementation instance + or this instance does not have a shape set for this Tensor which is needed for a tensor to also have a rank and dimensionality...
          - -
          boolean
          - -
          +
          booleanisVirtual()
          A Virtual tensor is a tensor whose underlying data array is of size 1, holding only a single value.
          - - -
          like(Tensor<V> template)
          -
          +
          static <V> IterByOrIterFromOrAllTensor<V>like(Tensor<V> template)
          Use this factory method to instantiate a new tensor with the same data type, shape - and memory location (Device instance) as the provided template tensor.
          - -
          default Tensor<V>
          -
          ln()
          -
          + and memory location (Device instance) as the provided template tensor.
          +
          default Tensor<V>ln()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>log10()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          -
          map(Function<V,V> mapper)
          -
          +
          default Tensor<V>map(java.util.function.Function<V,V> mapper)
          This method is a convenience method for mapping the items of this nd-array to another nd-array of the same type based on the provided lambda function, which will be applied to all items of this nd-array individually (element-wise).
          - -
          default <T> Tensor<T>
          -
          mapTo(Class<T> typeClass, - Function<V,T> mapper)
          -
          +
          default <T> Tensor<T>mapTo(java.lang.Class<T> typeClass, + java.util.function.Function<V,T> mapper)
          This is a convenience method for mapping a nd-array to a nd-array of new type based on a provided target item type and mapping lambda.
          - -
          default Tensor<V>
          -
          matMul(Tensor<V> other)
          -
          +
          default Tensor<V>matMul(Tensor<V> other)
          This will produce the matrix product of - two tensors with rank 2 (matrices), where the left operand is this Tensor + two tensors with rank 2 (matrices), where the left operand is this Tensor instance and the right operand is the argument passed to the method.
          - -
          default Tensor<V>
          -
          max()
          -
          +
          default Tensor<V>max()
          Calculate the max value of all values within this tensor and returns it in the form of a scalar tensor.
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>mean()
          Calculate the mean value of all values within this tensor and returns it in the form of a scalar tensor.
          - -
          default Tensor<V>
          -
          min()
          -
          +
          default Tensor<V>min()
          Calculate the min value of all values within this tensor and returns it in the form of a scalar tensor.
          - -
          default Tensor<V>
          -
          minus(Tensor<V> other)
          -
          +
          default Tensor<V>minus(Tensor<V> other)
          Performs subtraction on two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
          - -
          default Tensor<V>
          -
          minus(V other)
          -
          -
          This method will create a new Tensor - with the provided item subtracted from all elements of this Tensor.
          -
          -
          default Tensor<V>
          -
          mod(int other)
          -
           
          -
          default Tensor<V>
          -
          mod(Tensor<V> other)
          -
          +
          default Tensor<V>minus(V other) +
          This method will create a new Tensor + with the provided item subtracted from all elements of this Tensor.
          +
          default Tensor<V>mod(int other) 
          default Tensor<V>mod(Tensor<V> other)
          Produces the modulus of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
          - -
          default Tensor<V>
          -
          multiply(double value)
          -
           
          -
          default Tensor<V>
          -
          multiply(Tensor<V> other)
          -
          -
          This method is synonymous to the times(Tensor) method.
          -
          -
          default Tensor<V>
          -
          multiply(V other)
          -
           
          -
          default MutateTensor<V>
          -
          mut()
          -
          +
          default Tensor<V>multiply(double value) 
          default Tensor<V>multiply(Tensor<V> other) +
          This method is synonymous to the times(Tensor) method.
          +
          default Tensor<V>multiply(V other) 
          default MutateTensor<V>mut()
          This method exposes an API for mutating the state of this tensor.
          - -
          default Tensor<V>
          -
          neg()
          -
          +
          default Tensor<V>neg()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          - -
           
          -
          static Tensor<Object>
          - -
          +
          default Tensor<V>negative() 
          static Tensor<java.lang.Object>newInstance()
          This static factory method creates and return a completely empty and undefined tensor which is void of any contents and meaning.
          - -
          static Tensor<Boolean>
          -
          of(boolean... value)
          -
          +
          static Tensor<java.lang.Boolean>of(boolean... value)
          Constructs a vector of booleans based on the provided array.
          - -
          static Tensor<Byte>
          -
          of(byte value)
          -
          +
          static Tensor<java.lang.Byte>of(byte... value)
          Constructs a vector of bytes based on the provided array.
          - -
          static Tensor<Byte>
          -
          of(byte... value)
          -
          -
          Constructs a vector of bytes based on the provided array.
          -
          -
          static Tensor<Double>
          -
          of(double value)
          -
           
          -
          static Tensor<Double>
          -
          of(double... value)
          -
          -
          Constructs a vector of doubles based on the provided array.
          -
          -
          static Tensor<Float>
          -
          of(float value)
          -
          -
          Constructs a vector of floats based on the provided array.
          -
          -
          static Tensor<Float>
          -
          of(float... value)
          -
          -
          Constructs a vector of floats based on the provided array.
          -
          -
          static Tensor<Integer>
          -
          of(int value)
          -
          -
          Constructs a vector of ints based on the provided array.
          -
          -
          static Tensor<Integer>
          -
          of(int... value)
          -
          -
          Constructs a vector of ints based on the provided array.
          -
          -
          static Tensor<Long>
          -
          of(long value)
          -
          -
          Constructs a vector of longs based on the provided array.
          -
          -
          static Tensor<Long>
          -
          of(long... value)
          -
          -
          Constructs a vector of longs based on the provided array.
          -
          -
          static Tensor<Short>
          -
          of(short value)
          -
          -
          Constructs a vector of shorts based on the provided array.
          -
          -
          static Tensor<Short>
          -
          of(short... value)
          -
          -
          Constructs a vector of shorts based on the provided array.
          -
          -
          static <T> Tensor<T>
          -
          of(Class<T> type, - List<Object> conf)
          -
          -
          This factory method will turn a list of values or nested lists of values into a Tensor +
          static Tensor<java.lang.Byte>of(byte value) 
          static <T> Tensor<T>of(java.lang.Class<T> type, + java.util.List<java.lang.Object> conf) +
          This factory method will turn a list of values or nested lists of values into a Tensor instance with the corresponding rank and shape and whose values are of the provided type.
          - -
          static <T> Tensor<T>
          -
          of(Class<T> type, - Shape shape, - Filler<T> filler)
          -
          +
          static <T> Tensor<T>of(java.lang.Class<T> type, + Shape shape, + Filler<T> filler)
          This factory method allows the creation of tensors with an additional initialization lambda for filling the underlying data array with desired values.
          - - -
          of(Class<V> type)
          -
          +
          static <V> WithShapeOrScalarOrVectorOnDevice<V>of(java.lang.Class<V> type)
          This is the entry point to the fluent tensor builder API for building - Tensor instances in a readable and type safe fashion.
          - -
          static <V> Tensor<V>
          -
          of(Class<V> type, - List<Integer> shape, - Object data)
          -
          + Tensor instances in a readable and type safe fashion.
          +
          static <V> Tensor<V>of(java.lang.Class<V> type, + java.util.List<java.lang.Integer> shape, + java.util.List<V> data)
          Use this to construct and return a tensor of the specified type, shape and data object.
          - -
          static <V> Tensor<V>
          -
          of(Class<V> type, - List<Integer> shape, - List<V> data)
          -
          +
          static <V> Tensor<V>of(java.lang.Class<V> type, + java.util.List<java.lang.Integer> shape, + java.lang.Object data)
          Use this to construct and return a tensor of the specified type, shape and data object.
          - -
          static <V extends Number>
          Tensor<V>
          -
          of(Class<V> type, - Shape shape, - Number data)
          -
          +
          static <V> Tensor<V>of(java.lang.Class<V> valueType, + Shape shape, + Arg.Seed seed) +
          Use this to construct and return a seeded tensor of the specified type.
          +
          static <V> Tensor<V>of(java.lang.Class<V> type, + Shape shape, + java.util.List<V> data) +
          Use this to construct and return a tensor of the specified type, shape and list of items.
          +
          static <V extends java.lang.Number>
          Tensor<V>
          of(java.lang.Class<V> type, + Shape shape, + java.lang.Number data)
          Use this to construct and return a tensor of the specified type, shape and number.
          - -
          static <V> Tensor<V>
          -
          of(Class<V> type, - Shape shape, - Object data)
          -
          +
          static <V> Tensor<V>of(java.lang.Class<V> type, + Shape shape, + java.lang.Object data)
          Use this to construct and return a tensor of the specified type, shape and data object.
          - -
          static <V> Tensor<V>
          -
          of(Class<V> type, - Shape shape, - List<V> data)
          -
          -
          Use this to construct and return a tensor of the specified type, shape and list of items.
          -
          -
          static <V> Tensor<V>
          -
          of(Class<V> valueType, - Shape shape, - Arg.Seed seed)
          -
          -
          Use this to construct and return a seeded tensor of the specified type.
          -
          -
          static <T> Tensor<T>
          -
          of(Iterable<T> iterable)
          -
          -
          Constructs a vector of objects based on the provided iterable.
          -
          -
          static <T> Tensor<T>
          -
          of(Object... args)
          -
          -
          This static Tensor factory method tries to interpret the provided - arguments to create the instance the use might wants.
          -
          -
          static <V> Tensor<V>
          -
          of(String expression, - boolean doAD, - List<Tensor<V>> tensors)
          -
          -
          This method takes a list of tensors and a String expression describing - operations which ought to be applied to the tensors in said list.
          -
          -
          static <V> Tensor<V>
          -
          of(String expression, - boolean doAD, - Tensor<V>... tensors)
          -
          -
          This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array.
          -
          -
          static <V> Tensor<V>
          -
          of(String expression, - List<Tensor<V>> inputs)
          -
          -
          This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.
          -
          -
          static <T> Tensor<T>
          -
          of(String e1, - Tensor<T> a, - char o, - Tensor<T> b, - String e2)
          -
          -
          Use this to conveniently operate on 2 tensors.
          -
          -
          static <T> Tensor<T>
          -
          of(String e1, - Tensor<T> a, - String e2)
          -
          -
          Use this to conveniently operate on a tensor.
          -
          -
          static <T> Tensor<T>
          -
          of(String e1, - Tensor<T> a, - String e2, - Tensor<T> b, - String e3, - Tensor<T> c, - String e4)
          -
          -
          Use this to conveniently operate on 3 tensors.
          -
          -
          static <V> Tensor<V>
          -
          of(String expression, - Tensor<V> tensor)
          -
          -
          This method takes a tensor and a String expression describing - operations which ought to be applied to said tensor.
          -
          -
          static <V> Tensor<V>
          -
          of(String expression, - Tensor<V>... tensors)
          -
          -
          This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array.
          -
          -
          static <V extends Number>
          Tensor<V>
          -
          of(String expression, - V... inputs)
          -
          -
          This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.
          -
          -
          static Tensor<Double>
          -
          of(List<? extends Number> shape, - String seed)
          -
          -
          This factory method will create and return a Tensor instance - based on a list of Number instances whose rounded values will be interpreted as - the shape of this new Tensor instance and a seed which will serve - as a source of pseudo randomness to generate the values for the new instance.
          -
          -
          static <V> Tensor<V>
          -
          of(List<? extends Number> shape, - List<V> items)
          -
          -
          Creates a new Tensor instance based on a list of numbers representing the shape, - and a list of values representing the value of the resulting tensor.
          -
          -
          static <T> Tensor<T>
          -
          of(List<Integer> shape, - T item)
          -
          -
          This is a convenient factory method for creating Tensor instances for - values of type Tensor based on a list of integers - defining a shape made up of axes sizes as well as a scalar value of type Tensor - which will fill out the data array spanned by the provided shape information.
          -
          -
          static Tensor<Object>
          -
          of(List<Object> conf)
          -
          -
          This factory method will turn a list of values or nested lists of values into a Tensor - instance with the corresponding rank and shape.
          -
          -
          static <T> Tensor<T>
          -
          of(DataType<T> type, - List<Integer> shape, - Filler<T> filler)
          -
          +
          static <T> Tensor<T>of(DataType<T> type, + java.util.List<java.lang.Integer> shape, + Filler<T> filler)
          This factory method allows the creation of tensors with an additional initialization lambda for filling the underlying data array with desired values.
          - -
          static <T> Tensor<T>
          -
          of(DataType<T> type, - Shape shape, - Filler<T> filler)
          -
          +
          static <T> Tensor<T>of(DataType<T> type, + Shape shape, + Filler<T> filler)
          This factory method allows the creation of tensors with an additional initialization lambda for filling the underlying data array with desired values.
          - -
          static <V> Tensor<V>
          -
          of(DataType<V> dataType, - List<Integer> shape, - List<V> data)
          -
          +
          static <V extends N,N>
          Tensor<V>
          of(DataType<V> dataType, + Device<N> device, + Shape shape, + java.lang.Object data) +
          This factory method is among the most flexible and forgiving ways to create a Tensor instance.
          +
          static <V> Tensor<V>of(DataType<V> dataType, + java.util.List<java.lang.Integer> shape, + java.util.List<V> data)
          Use this to construct and return a tensor of the specified type, shape and data object.
          - -
          static <V extends N, -N>
          Tensor<V>
          -
          of(DataType<V> dataType, - Device<N> device, - Shape shape, - Object data)
          -
          -
          This factory method is among the most flexible and forgiving ways to create a Tensor instance.
          -
          -
          static <V> Tensor<V>
          -
          of(DataType<V> dataType, - NDConstructor ndConstructor, - Data<V> data)
          -
          +
          static <V> Tensor<V>of(DataType<V> dataType, + NDConstructor ndConstructor, + Data<V> data)
          This factory method a raw tensor constructor which will not perform any type checking or data conversion on the data provided to it.
          - -
          static <V> Tensor<V>
          -
          of(DataType<V> type, - Shape shape)
          -
          +
          static <V> Tensor<V>of(DataType<V> type, + Shape shape)
          Use this to construct and return a tensor of the specified type and shape.
          - -
          static <V> Tensor<V>
          -
          of(DataType<V> dataType, - Shape shape, - Object data)
          -
          -
          This factory method is among the most flexible and forgiving ways to create a Tensor instance.
          -
          -
          static <V> Tensor<V>
          -
          of(DataType<V> dataType, - Shape shape, - List<V> data)
          -
          +
          static <V> Tensor<V>of(DataType<V> dataType, + Shape shape, + java.util.List<V> data)
          Use this to construct and return a tensor of the specified type, shape and a list of items.
          - -
          static Tensor<Boolean>
          -
          of(Shape shape, - boolean[] values)
          -
          +
          static <V> Tensor<V>of(DataType<V> dataType, + Shape shape, + java.lang.Object data) +
          This factory method is among the most flexible and forgiving ways to create a Tensor instance.
          +
          static Tensor<java.lang.Double>of(double... value) +
          Constructs a vector of doubles based on the provided array.
          +
          static Tensor<java.lang.Double>of(double value) 
          static Tensor<java.lang.Float>of(float... value) +
          Constructs a vector of floats based on the provided array.
          +
          static Tensor<java.lang.Float>of(float value) 
          static Tensor<java.lang.Integer>of(int... value) +
          Constructs a vector of ints based on the provided array.
          +
          static Tensor<java.lang.Integer>of(int value) 
          static <T> Tensor<T>of(java.lang.Iterable<T> iterable) +
          Constructs a vector of objects based on the provided iterable.
          +
          static <V> Tensor<V>of(java.util.List<? extends java.lang.Number> shape, + java.util.List<V> items) +
          Creates a new Tensor instance based on a list of numbers representing the shape, + and a list of values representing the value of the resulting tensor.
          +
          static Tensor<java.lang.Double>of(java.util.List<? extends java.lang.Number> shape, + java.lang.String seed) +
          This factory method will create and return a Tensor instance + based on a list of Number instances whose rounded values will be interpreted as + the shape of this new Tensor instance and a seed which will serve + as a source of pseudo randomness to generate the values for the new instance.
          +
          static <T> Tensor<T>of(java.util.List<java.lang.Integer> shape, + T item) +
          This is a convenient factory method for creating Tensor instances for + values of type T based on a list of integers + defining a shape made up of axes sizes as well as a scalar value of type T + which will fill out the data array spanned by the provided shape information.
          +
          static Tensor<java.lang.Object>of(java.util.List<java.lang.Object> conf) +
          This factory method will turn a list of values or nested lists of values into a Tensor + instance with the corresponding rank and shape.
          +
          static Tensor<java.lang.Long>of(long... value) +
          Constructs a vector of longs based on the provided array.
          +
          static Tensor<java.lang.Long>of(long value) 
          static <T> Tensor<T>of(java.lang.Object... args) +
          This static Tensor factory method tries to interpret the provided + arguments to create the instance the use might wants.
          +
          static Tensor<java.lang.Boolean>of(Shape shape, + boolean[] values)
          Use this to construct and return a boolean tensor of the specified shape and initial values.
          - -
          static Tensor<Byte>
          -
          of(Shape shape, - byte[] values)
          -
          +
          static Tensor<java.lang.Byte>of(Shape shape, + byte[] values)
          Use this to construct and return a byte tensor of the specified shape and initial values.
          - -
          static Tensor<Double>
          -
          of(Shape shape, - double value)
          -
          +
          static <V> Tensor<V>of(Shape shape, + Data<V> data) +
          Use this to construct and return a tensor of the specified shape and data object.
          + This method is typically used like this:
          +
          static Tensor<java.lang.Double>of(Shape shape, + double value)
          Use this to construct and return a homogeneously populated double tensor of the specified shape.
          - -
          static Tensor<Double>
          -
          of(Shape shape, - double[] values)
          -
          +
          static Tensor<java.lang.Double>of(Shape shape, + double[] values)
          Use this to construct and return a double tensor of the specified shape and initial values.
          - -
          static Tensor<Float>
          -
          of(Shape shape, - float value)
          -
          +
          static Tensor<java.lang.Float>of(Shape shape, + float value)
          Use this to construct and return a homogeneously populated float tensor of the specified shape.
          - -
          static Tensor<Float>
          -
          of(Shape shape, - float[] values)
          -
          +
          static Tensor<java.lang.Float>of(Shape shape, + float[] values)
          Use this to construct and return a float tensor of the specified shape and initial values.
          - -
          static Tensor<Integer>
          -
          of(Shape shape, - int[] values)
          -
          +
          static Tensor<java.lang.Integer>of(Shape shape, + int[] values)
          Use this to construct and return an int tensor of the specified shape and initial values.
          - -
          static Tensor<Long>
          -
          of(Shape shape, - long[] values)
          -
          +
          static <V> Tensor<V>of(Shape shape, + java.util.List<V> items) +
          Creates a new Tensor instance based on a shape tuple of numbers representing the nd-array shape, + and a list of items representing the value of the resulting tensor.
          +
          static Tensor<java.lang.Long>of(Shape shape, + long[] values)
          Use this to construct and return a long tensor of the specified shape and initial values.
          - -
          static Tensor<Short>
          -
          of(Shape shape, - short[] values)
          -
          +
          static Tensor<java.lang.Short>of(Shape shape, + short[] values)
          Use this to construct and return a short tensor of the specified shape and initial values.
          - -
          static <V> Tensor<V>
          -
          of(Shape shape, - List<V> items)
          -
          -
          Creates a new Tensor instance based on a shape tuple of numbers representing the nd-array shape, - and a list of items representing the value of the resulting tensor.
          -
          -
          static <V> Tensor<V>
          -
          of(Shape shape, - Data<V> data)
          -
          -
          Use this to construct and return a tensor of the specified shape and data object.
          - This method is typically used like this:
          -
          -
          static <T> Tensor<T>
          -
          of(Shape shape, - T value)
          -
          -
          This is a convenient factory method for creating Tensor instances for - representing items of type Tensor.
          -
          -
          static <T> Tensor<T>
          -
          of(Tensor<T> a, - char o, - Tensor<T> b)
          -
          +
          static <T> Tensor<T>of(Shape shape, + T value) +
          This is a convenient factory method for creating Tensor instances for + representing items of type T.
          +
          static Tensor<java.lang.Short>of(short... value) +
          Constructs a vector of shorts based on the provided array.
          +
          static Tensor<java.lang.Short>of(short value) 
          static <V> Tensor<V>of(java.lang.String expression, + boolean doAD, + java.util.List<Tensor<V>> tensors) +
          This method takes a list of tensors and a String expression describing + operations which ought to be applied to the tensors in said list.
          +
          static <V> Tensor<V>of(java.lang.String expression, + boolean doAD, + Tensor<V>... tensors) +
          This method takes an array of tensors and a String expression describing + operations which ought to be applied to the tensors in said array.
          +
          static <V> Tensor<V>of(java.lang.String expression, + java.util.List<Tensor<V>> inputs) +
          This factory method allows for the creation and execution of Function instances + without actually instantiating them manually, + where the result will then be returned by this factory method.
          +
          static <T> Tensor<T>of(java.lang.String e1, + Tensor<T> a, + char o, + Tensor<T> b, + java.lang.String e2)
          Use this to conveniently operate on 2 tensors.
          - -
          static <T> Tensor<T>
          -
          of(Tensor<T> a, - char o1, - Tensor<T> b, - char o2, - Tensor<T> c)
          -
          +
          static <T> Tensor<T>of(java.lang.String e1, + Tensor<T> a, + java.lang.String e2) +
          Use this to conveniently operate on a tensor.
          +
          static <T> Tensor<T>of(java.lang.String e1, + Tensor<T> a, + java.lang.String e2, + Tensor<T> b, + java.lang.String e3, + Tensor<T> c, + java.lang.String e4)
          Use this to conveniently operate on 3 tensors.
          - -
          static <V> Tensor<V>
          -
          ofAny(Class<V> type, - Shape shape, - Object data)
          -
          +
          static <V> Tensor<V>of(java.lang.String expression, + Tensor<V>... tensors) +
          This method takes an array of tensors and a String expression describing + operations which ought to be applied to the tensors in said array.
          +
          static <V> Tensor<V>of(java.lang.String expression, + Tensor<V> tensor) +
          This method takes a tensor and a String expression describing + operations which ought to be applied to said tensor.
          +
          static <V extends java.lang.Number>
          Tensor<V>
          of(java.lang.String expression, + V... inputs) +
          This factory method allows for the creation and execution of Function instances + without actually instantiating them manually, + where the result will then be returned by this factory method.
          +
          static <T> Tensor<T>of(Tensor<T> a, + char o, + Tensor<T> b) +
          Use this to conveniently operate on 2 tensors.
          +
          static <T> Tensor<T>of(Tensor<T> a, + char o1, + Tensor<T> b, + char o2, + Tensor<T> c) +
          Use this to conveniently operate on 3 tensors.
          +
          static <V> Tensor<V>ofAny(java.lang.Class<V> type, + Shape shape, + java.lang.Object data)
          Use this to construct and return a tensor of the specified type, shape and data object.
          - - - -
          -
          This is a simple convenience method which is simply calling the of(Class) +
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Byte>ofBytes() +
          This is a simple convenience method which is simply calling the of(Class) method like so: of(Byte.class).
          - - - -
          -
          This is a simple convenience method which is simply calling the of(Class) +
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Double>ofDoubles() +
          This is a simple convenience method which is simply calling the of(Class) method like so: of(Double.class).
          - - - -
          -
          This is a simple convenience method which is simply calling the of(Class) +
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Float>ofFloats() +
          This is a simple convenience method which is simply calling the of(Class) method like so: of(Float.class).
          - - - -
          -
          This is a simple convenience method which is simply calling the of(Class) +
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Integer>ofInts() +
          This is a simple convenience method which is simply calling the of(Class) method like so: of(Integer.class).
          - -
          static <V> Tensor<V>
          -
          ofRandom(Class<V> valueTypeClass, - int... shape)
          -
          +
          static <V> Tensor<V>ofRandom(java.lang.Class<V> valueTypeClass, + int... shape)
          This factory method produces a randomly populated tensor of the provided type and shape using a hard coded default seed.
          - - - -
          -
          This is a simple convenience method which is simply calling the of(Class) +
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Short>ofShorts() +
          This is a simple convenience method which is simply calling the of(Class) method like so: of(Short.class).
          - -
          default Tensor<V>
          -
          permute(int... dims)
          -
          +
          default Tensor<V>permute(int... dims)
          Returns a view of the original tensor input with its dimensions permuted.
          Consider a 3-dimensional tensor x with shape (2×3×5), then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
          - -
          default Tensor<V>
          -
          plus(Tensor<V> other)
          -
          +
          default Tensor<V>plus(Tensor<V> other)
          This method will produce the addition of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
          - -
          default Tensor<V>
          -
          plus(V value)
          -
          -
          This method will create a new Tensor - with the provided double scalar added to all elements of this Tensor.
          -
          -
          default Tensor<V>
          -
          power(Tensor<V> other)
          -
          +
          default Tensor<V>plus(V value) +
          This method will create a new Tensor + with the provided double scalar added to all elements of this Tensor.
          +
          default Tensor<V>power(Tensor<V> other)
          This will produce the power of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method.
          - -
          default Tensor<V>
          -
          power(V value)
          -
          +
          default Tensor<V>power(V value)
          Raises all items of this tensor to the power of the provided value.
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>relu()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          -
          rem(int other)
          -
          -
          This method is synonymous to the mod(int) method.
          -
          -
          default Tensor<V>
          -
          reshape(int... shape)
          -
          +
          default Tensor<V>rem(int other) +
          This method is synonymous to the mod(int) method.
          +
          default Tensor<V>reshape(int... shape)
          Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape.
          - -
          boolean
          - -
          +
          booleanrqsGradient()
          This flag will indirectly trigger the activation of the autograd / auto-differentiation system of this library! If the flag is set to 'true' and the tensor is used for computation then - it will also receive gradients when the backward() method is being called + it will also receive gradients when the backward() method is being called on any descendant tensor within the computation graph.
          - -
          default Tensor<V>
          -
          set(OptimizerFactory optimizerFactory)
          -
          -
          Configures an Optimizer for this tensor based on the given OptimizerFactory - which will be used to create a new Optimizer instance specific to this tensor.
          -
          - -
          setGradientApplyRequested(boolean applyRequested)
          -
          +
          default Tensor<V>set(OptimizerFactory optimizerFactory) +
          Configures an Optimizer for this tensor based on the given OptimizerFactory + which will be used to create a new Optimizer instance specific to this tensor.
          +
          Tensor<V>setGradientApplyRequested(boolean applyRequested)
          This flag works alongside two autograd features which can be enabled inside the library settings.
          - - -
          setRqsGradient(boolean rqsGradient)
          -
          +
          Tensor<V>setRqsGradient(boolean rqsGradient)
          Setting this flag to true will tell the autograd system to accumulate gradients at this tensor.
          - -
          default Tensor<V>
          - -
           
          -
          default Tensor<V>
          - -
          +
          default Tensor<V>shallowClone() 
          default Tensor<V>shallowCopy()
          This creates a copy where the underlying data is still the same.
          - -
          static <T> Collector<T,?,Tensor<T>>
          -
          shaped(int... shape)
          -
          +
          static <T> java.util.stream.Collector<T,?,Tensor<T>>shaped(int... shape)
          Returns a Collector that accumulates the input elements into a - new Tensor with the specified shape.
          - -
          static <T> Collector<T,?,Tensor<T>>
          -
          shaped(Shape shape)
          -
          + new Tensor with the specified shape.
          +
          static <T> java.util.stream.Collector<T,?,Tensor<T>>shaped(Shape shape)
          Returns a Collector that accumulates the input elements into a - new Tensor with the specified shape.
          - -
          default Tensor<V>
          -
          sig()
          -
          + new Tensor with the specified shape.
          +
          default Tensor<V>sig()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          - -
           
          -
          default Tensor<V>
          -
          sin()
          -
          +
          default Tensor<V>sigmoid() 
          default Tensor<V>sin()
          This method is a functionally identical to the following alternatives:
          - - - -
          -
          This method returns a SliceBuilder instance exposing a simple builder API +
          AxisOrGetTensor<V>slice() +
          This method returns a SliceBuilder instance exposing a simple builder API which enables the configuration of a slice of the current nd-array via method chaining.
          - -
          default int
          - -
          +
          default intsliceCount()
          This method returns the number of slices which have been created from this nd-array.
          - -
          default Tensor<V>
          - -
           
          -
          default Tensor<V>
          -
          softmax(int axis)
          -
           
          -
          default Tensor<V>
          -
          softmax(int... axes)
          -
          +
          default Tensor<V>softmax() 
          default Tensor<V>softmax(int... axes)
          Calculates the softmax function along the specified axes.
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>softmax(int axis) 
          default Tensor<V>softplus()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          - -
          +
          default Tensor<V>sqrt()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          -
          sum()
          -
          +
          default Tensor<V>sum()
          Calculate the sum value of all values within this tensor and returns it in the form of a scalar tensor.
          - -
          default Tensor<V>
          -
          sum(int axis)
          -
          +
          default Tensor<V>sum(int... axes)
          Calculate the sum value of all values - within this tensor along the specified axis and returns it + within this tensor along the specified axes and returns it in the form of a tensor.
          - -
          default Tensor<V>
          -
          sum(int... axes)
          -
          +
          default Tensor<V>sum(int axis)
          Calculate the sum value of all values - within this tensor along the specified axes and returns it + within this tensor along the specified axis and returns it in the form of a tensor.
          - -
          default Tensor<V>
          -
          T()
          -
          -
          Creates and returns a new Tensor instance which is a transposed twin of this instance.
          - This is a shorter alternative to the functionally identical getT() method.
          -
          -
          default Tensor<V>
          - -
          +
          default Tensor<V>T() +
          Creates and returns a new Tensor instance which is a transposed twin of this instance.
          + This is a shorter alternative to the functionally identical getT() method.
          +
          default Tensor<V>tanh()
          This method is a functionally identical to the following alternatives:
          - -
          default Tensor<V>
          -
          times(Tensor<V> other)
          -
          -
          This is a functionally identical synonym to the multiply(Tensor) method.
          -
          -
          default Tensor<V>
          -
          times(V other)
          -
           
          -
          default Tensor<V>
          -
          to(String deviceType)
          -
           
          - -
          to(Device<?> device)
          -
          -
          This method takes a Device and tries to migrate the contents of this Tensor - instance to that Device!
          -
          -
          default String
          - -
           
          -
          default String
          - -
          +
          default Tensor<V>times(Tensor<V> other) +
          This is a functionally identical synonym to the multiply(Tensor) method.
          +
          default Tensor<V>times(V other) 
          Tensor<V>to(Device<?> device) +
          This method takes a Device and tries to migrate the contents of this Tensor + instance to that Device!
          +
          default Tensor<V>to(java.lang.String deviceType) 
          default java.lang.StringtoString(java.util.function.Consumer<NDPrintSettings> configurator)
          This allows you to provide a lambda which configures how this nd-array should be - converted to String instances.
          - -
          default String
          - -
          -
          Use this to turn this nd-array into a String instance based on the provided - NDPrintSettings instance, which allows you to configure things + converted to String instances.
          +
          default java.lang.StringtoString(NDPrintSettings config) +
          Use this to turn this nd-array into a String instance based on the provided + NDPrintSettings instance, which allows you to configure things like the number of chars per entry, delimiters, the number of items per line, etc.
          - -
          default Tensor<V>
          -
          transpose(int dim1, - int dim2)
          -
          +
          default java.lang.StringtoString(java.lang.String conf) 
          default Tensor<V>transpose(int dim1, + int dim2)
          Returns a view of the original tensor input the targeted axes are swapped / transposed.
          - -
          default boolean
          - -
          -
          Important : Components of type Tensor are simply gradients! +
          default booleanupdate(Component.OwnerChangeRequest<Tensor<V>> changeRequest) +
          Important : Components of type Tensor are simply gradients! Currently, this method is used only to catch illegal arguments which is for example the case when trying to attach a gradient with a different shape...
          - - - -
          - -
          withLabels(String[]... labels)
          -
          -
          This method receives a nested String array which - ought to contain a label for the index of this nd-array.
          -
          - - -
          -
          This method receives a nested String list which +
          Tensor<V>withLabel(java.lang.String label)
          Tensor<V>withLabels(java.util.List<java.util.List<java.lang.Object>> labels) +
          This method receives a nested String list which ought to contain a label for the index of this nd-array.
          - - - -
          +
          Tensor<V>withLabels(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels)
          This method provides the ability to label not only the indices of the shape of this nd-array, but also the dimension of the shape.
          +
          Tensor<V>withLabels(java.lang.String[]... labels) +
          This method receives a nested String array which + ought to contain a label for the index of this nd-array.
          +
          default Tensor<V>xor(double value) +
          This method is a functionally identical synonym to the power(Tensor) method.
          +
          default Tensor<V>xor(Tensor<V> other) +
          This method is a functionally identical synonym to the power(Tensor) method.
          +
          + + +
            +
          • + + +

            Methods inherited from interface java.lang.Iterable

            +forEach, iterator, spliterator
          • +
          + +
        • +
        +
      • +
      -
      default Tensor<V>
      -
      xor(double value)
      -
      -
      This method is a functionally identical synonym to the power(Tensor) method.
      -
      -
      default Tensor<V>
      -
      xor(Tensor<V> other)
      -
      -
      This method is a functionally identical synonym to the power(Tensor) method.
      -
      -
    -
    -
    -
    -

    Methods inherited from interface neureka.common.composition.ComponentOwner

    -find, get, getAll, has, remove, set
    -
    -

    Methods inherited from interface java.lang.Iterable

    -forEach, iterator, spliterator
    - - - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Method Details

        -
          -
        • -
          -

          newInstance

          -
          static Tensor<Object> newInstance()
          +
            +
          • + + +

            Method Detail

            + + + +
              +
            • +

              newInstance

              +
              static Tensor<java.lang.Object> newInstance()
              This static factory method creates and return a completely empty and undefined tensor which is void of any contents and meaning. - The use case for this would be to use the produced Tensor + The use case for this would be to use the produced Tensor instance as a target for an inline operations which fills the instance with an actual value.
              - An example of this approach would be to call the MutateTensor.putAt(List, Nda) method with an empty list as key. + An example of this approach would be to call the MutateTensor.putAt(List, Nda) method with an empty list as key. This will be interpreted as an inline copy of the contents of the - second parameter into this Tensor instance.
              -
              -
              Returns:
              -
              A new and completely empty / uninitialized Tensor instance.
              -
              -
          -
        • -
        • -
          -

          of

          -
          static <T> Tensor<T> of(Tensor<T> a, - char o, - Tensor<T> b)
          + second parameter into this Tensor instance.
      +
      +
      Returns:
      +
      A new and completely empty / uninitialized Tensor instance.
      +
      + +
    + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(Tensor<T> a,
      +                        char o,
      +                        Tensor<T> b)
      Use this to conveniently operate on 2 tensors. A simple example would be: of(a,'*',b).
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The value item type parameter for the involved tensors.
      -
      Parameters:
      +
      Parameters:
      a - The left operand.
      o - The operator, which may be '+', '-', '*'...
      b - The right operand.
      -
      Returns:
      +
      Returns:
      The result of the operation defined by the provided character.
      -
    -
  • -
    -

    of

    -
    static <T> Tensor<T> of(Tensor<T> a, - char o1, - Tensor<T> b, - char o2, - Tensor<T> c)
    + + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(Tensor<T> a,
      +                        char o1,
      +                        Tensor<T> b,
      +                        char o2,
      +                        Tensor<T> c)
      Use this to conveniently operate on 3 tensors. A simple example would be: of(a,'*',b,'+',c).
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The value item type parameter for the involved tensors.
      -
      Parameters:
      +
      Parameters:
      a - The first and left most operand.
      o1 - The first operator, which may be '+', '-', '*'...
      b - The second operand.
      o2 - The second operator, which may also be '+', '-', '*'...
      c - The third and last operand.
      -
      Returns:
      +
      Returns:
      The result of the operations defined by the 2 provided characters.
      -
  • -
  • -
    -

    of

    -
    static <T> Tensor<T> of(String e1, - Tensor<T> a, - String e2)
    + + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.lang.String e1,
      +                        Tensor<T> a,
      +                        java.lang.String e2)
      Use this to conveniently operate on a tensor. A simple example would be: of("sig(tanh(",a,"))").
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The value item type parameter for the involved tensor.
      -
      Parameters:
      +
      Parameters:
      e1 - The first part of the string expression defining how the provided tensor should be processed.
      a - The tensor which ought to be sent to whatever is defined by the provided expressions.
      e2 - The latter part of the expression defining how the provided tensor should be executed.
      -
      Returns:
      +
      Returns:
      The result of the operation(s) defined by the provided strings.
      -
  • -
  • -
    -

    of

    -
    static <T> Tensor<T> of(String e1, - Tensor<T> a, - char o, - Tensor<T> b, - String e2)
    + + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.lang.String e1,
      +                        Tensor<T> a,
      +                        char o,
      +                        Tensor<T> b,
      +                        java.lang.String e2)
      Use this to conveniently operate on 2 tensors. A simple example would be: of("relu(",a,'-',b,")*2").
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The value item type parameter for the involved tensor.
      -
      Parameters:
      +
      Parameters:
      e1 - The first part of the string expression defining how the provided tensor should be processed.
      a - The first tensor which ought to be sent to whatever function is defined by the provided expressions.
      o - An operator combining both a and b to form a result.
      b - The second tensor and right operand which ought to be sent to whatever function is defined by the provided expressions.
      e2 - The latter part of the expression defining how the provided tensor should be executed.
      -
      Returns:
      +
      Returns:
      The result of the operation(s) defined by the provided strings.
      -
    -
  • -
  • -
    -

    of

    -
    static <T> Tensor<T> of(String e1, - Tensor<T> a, - String e2, - Tensor<T> b, - String e3, - Tensor<T> c, - String e4)
    +
  • + + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.lang.String e1,
      +                        Tensor<T> a,
      +                        java.lang.String e2,
      +                        Tensor<T> b,
      +                        java.lang.String e3,
      +                        Tensor<T> c,
      +                        java.lang.String e4)
      Use this to conveniently operate on 3 tensors. A simple example would be: of("abs((",a,"-",b,") * ",c,")").
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter for the involved tensors.
      -
      Parameters:
      +
      Parameters:
      e1 - The first part of the expression which would typically be used to define a function name.
      a - The first argument.
      e2 - The second part of the expression, which might be an operation.
      @@ -1428,178 +1688,216 @@

      of

      e3 - The third part of the expression...
      c - The third argument.
      e4 - The last part of the expression which should syntactically match the other expression...
      -
      Returns:
      +
      Returns:
      The result of the calculation defined by the provided expressions and arguments.
      -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(Object... args)
      -
      This static Tensor factory method tries to interpret the provided +
    + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.lang.Object... args)
      +
      This static Tensor factory method tries to interpret the provided arguments to create the instance the use might wants.
      -
      -
      Parameters:
      +
      +
      Parameters:
      args - The arguments which ought to be interpreted.
      -
      Returns:
      -
      The result of the interpretation in the form of a Tensor instance of typ Object.
      +
      Returns:
      +
      The result of the interpretation in the form of a Tensor instance of typ Object.
      -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(Iterable<T> iterable)
      +
    + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.lang.Iterable<T> iterable)
      Constructs a vector of objects based on the provided iterable.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      iterable - The iterable of objects from which a 1D nd-array ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of objects.
      -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(List<Integer> shape, - T item)
      -
      This is a convenient factory method for creating Tensor instances for - values of type Tensor based on a list of integers - defining a shape made up of axes sizes as well as a scalar value of type Tensor - which will fill out the data array spanned by the provided shape information.
      -
      -
      Parameters:
      -
      shape - A list of integers whose values ought to define the size of the axes of the shape of the new Tensor.
      -
      item - An object of type Tensor which will populate the data array of the new instance.
      -
      Returns:
      -
      A new Tensor instance for the generic type Tensor.
      -
      -
      -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(Shape shape, - T value)
      -
      This is a convenient factory method for creating Tensor instances for - representing items of type Tensor. The factory method - instantiates tensors based on a Shape tuple of integers - defining axes sizes, and a scalar item of type Tensor +
    + + + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.util.List<java.lang.Integer> shape,
      +                        T item)
      +
      This is a convenient factory method for creating Tensor instances for + values of type T based on a list of integers + defining a shape made up of axes sizes as well as a scalar value of type T + which will fill out the data array spanned by the provided shape information.
      +
      +
      Parameters:
      +
      shape - A list of integers whose values ought to define the size of the axes of the shape of the new Tensor.
      +
      item - An object of type T which will populate the data array of the new instance.
      +
      Returns:
      +
      A new Tensor instance for the generic type T.
      +
      +
    • +
    + + + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(Shape shape,
      +                        T value)
      +
      This is a convenient factory method for creating Tensor instances for + representing items of type T. The factory method + instantiates tensors based on a Shape tuple of integers + defining axes sizes, and a scalar item of type T which will fill out the data array spanned by the provided shape information. A simple usage example would be:
      
            Tensor.of(Shape.of( 4, 3, 6 ), 42);
         
      -
      -
      Parameters:
      -
      shape - An immutable tuple of integers whose values ought to define the size of the axes of the shape of the new Tensor.
      -
      value - An object of type Tensor which will populate the data array of the new instance.
      -
      Returns:
      -
      A new Tensor instance for the generic type Tensor.
      -
      - -
    • -
    • -
      -

      of

      -
      static Tensor<Double> of(List<? extends Number> shape, - String seed)
      -
      This factory method will create and return a Tensor instance - based on a list of Number instances whose rounded values will be interpreted as - the shape of this new Tensor instance and a seed which will serve +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      +
      shape - An immutable tuple of integers whose values ought to define the size of the axes of the shape of the new Tensor.
      +
      value - An object of type T which will populate the data array of the new instance.
      +
      Returns:
      +
      A new Tensor instance for the generic type T.
      +
      +
    • +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Double> of(java.util.List<? extends java.lang.Number> shape,
      +                                   java.lang.String seed)
      +
      This factory method will create and return a Tensor instance + based on a list of Number instances whose rounded values will be interpreted as + the shape of this new Tensor instance and a seed which will serve as a source of pseudo randomness to generate the values for the new instance.
      -
      -
      Parameters:
      -
      shape - A list of Number instances which will be interpreted as a shape array.
      -
      seed - A source of pseudo randomness for the Tensor instance created by this method.
      -
      Returns:
      -
      A new Tensor instance created based on a shape and a seed.
      -
      - -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(List<? extends Number> shape, - List<V> items)
      -
      Creates a new Tensor instance based on a list of numbers representing the shape, +
      +
      Parameters:
      +
      shape - A list of Number instances which will be interpreted as a shape array.
      +
      seed - A source of pseudo randomness for the Tensor instance created by this method.
      +
      Returns:
      +
      A new Tensor instance created based on a shape and a seed.
      +
      +
    • +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.util.List<? extends java.lang.Number> shape,
      +                        java.util.List<V> items)
      +
      Creates a new Tensor instance based on a list of numbers representing the shape, and a list of values representing the value of the resulting tensor.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of the value list and returned tensor.
      -
      Parameters:
      -
      shape - A list of numbers whose integer values will be used to form the shape of the resulting Tensor.
      -
      items - A list of values which will be used to populate the data array of the resulting Tensor.
      -
      Returns:
      -
      A new Tensor instance constructed based on the provided shape and value list.
      -
      - -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(Shape shape, - List<V> items)
      -
      Creates a new Tensor instance based on a shape tuple of numbers representing the nd-array shape, +
      Parameters:
      +
      shape - A list of numbers whose integer values will be used to form the shape of the resulting Tensor.
      +
      items - A list of values which will be used to populate the data array of the resulting Tensor.
      +
      Returns:
      +
      A new Tensor instance constructed based on the provided shape and value list.
      + +
    • +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(Shape shape,
      +                        java.util.List<V> items)
      +
      Creates a new Tensor instance based on a shape tuple of numbers representing the nd-array shape, and a list of items representing the value of the resulting tensor.
      A simple usage example would be:
      
            Tensor.of(Shape.of( 2, 3, 4 ), myListOfItems);
         
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of the value list and returned tensor.
      -
      Parameters:
      -
      shape - A shape tuple of numbers whose integer values will be used to form the shape of the resulting Tensor.
      -
      items - A list of values which will be used to populate the data array of the resulting Tensor.
      -
      Returns:
      -
      A new Tensor instance constructed based on the provided shape and value list.
      -
      - -
    • -
    • -
      -

      of

      -
      static Tensor<Object> of(List<Object> conf)
      -
      This factory method will turn a list of values or nested lists of values into a Tensor +
      Parameters:
      +
      shape - A shape tuple of numbers whose integer values will be used to form the shape of the resulting Tensor.
      +
      items - A list of values which will be used to populate the data array of the resulting Tensor.
      +
      Returns:
      +
      A new Tensor instance constructed based on the provided shape and value list.
      + +
    • +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Object> of(java.util.List<java.lang.Object> conf)
      +
      This factory method will turn a list of values or nested lists of values into a Tensor instance with the corresponding rank and shape.
      -
      -
      Parameters:
      +
      +
      Parameters:
      conf - A list of either values or nested lists which are themselves either or.
      -
      Returns:
      -
      A new Tensor instance whose shape and data is based on the provided list structure.
      +
      Returns:
      +
      A new Tensor instance whose shape and data is based on the provided list structure.
      -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(Class<T> type, - List<Object> conf)
      -
      This factory method will turn a list of values or nested lists of values into a Tensor +
    + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.lang.Class<T> type,
      +                        java.util.List<java.lang.Object> conf)
      +
      This factory method will turn a list of values or nested lists of values into a Tensor instance with the corresponding rank and shape and whose values are of the provided type.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter of the tensor returned by this factory method.
      -
      Parameters:
      +
      Parameters:
      type - The type of the tensor produced by this factory method.
      conf - A list of either values or nested lists which are themselves either or.
      -
      Returns:
      -
      A new Tensor instance whose shape and data is based on the provided list structure.
      +
      Returns:
      +
      A new Tensor instance whose shape and data is based on the provided list structure.
      -
    • -
    • -
      -

      of

      -
      static <V> WithShapeOrScalarOrVectorOnDevice<V> of(Class<V> type)
      +
    + + + + + + + +
      +
    • +

      ofDoubles

      +
      static WithShapeOrScalarOrVectorOnDevice<java.lang.Double> ofDoubles()
      +
      This is a simple convenience method which is simply calling the of(Class) method like so: of(Double.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tensor builder API which in this case will lead to the creation + The returned WithShapeOrScalarOrVector is the next step in the + fluent Tensor builder API which in this case will lead to the creation of a tensor storing doubles.
      A simple usage example would be:
      
      @@ -1655,20 +1958,25 @@ 

      ofDoubles

      .withShape( 2, 3, 4 ) .andFill( 5d, 3d, 5d )
      -
      -
      Returns:
      -
      The next step of the Tensor builder API which exposes methods for defining shapes.
      +
      +
      Specified by:
      +
      ofDoubles in interface Nda<V>
      +
      Returns:
      +
      The next step of the Tensor builder API which exposes methods for defining shapes.
      -
    • -
    • -
      -

      ofFloats

      - -
      This is a simple convenience method which is simply calling the of(Class) +
    + + + +
      +
    • +

      ofFloats

      +
      static WithShapeOrScalarOrVectorOnDevice<java.lang.Float> ofFloats()
      +
      This is a simple convenience method which is simply calling the of(Class) method like so: of(Float.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tensor builder API which in this case will lead to the creation + The returned WithShapeOrScalarOrVector is the next step in the + fluent Tensor builder API which in this case will lead to the creation of a tensor storing floats.
      A simple usage example would be:
      
      @@ -1676,663 +1984,810 @@ 

      ofFloats

      .withShape( 2, 3, 4 ) .andFill( 5f, 7f, 11f )
      -
      -
      Returns:
      -
      The next step of the Tensor builder API which exposes methods for defining shapes.
      +
      +
      Specified by:
      +
      ofFloats in interface Nda<V>
      +
      Returns:
      +
      The next step of the Tensor builder API which exposes methods for defining shapes.
      -
    • -
    • -
      -

      ofInts

      - -
      This is a simple convenience method which is simply calling the of(Class) +
    + + + +
      +
    • +

      ofInts

      +
      static WithShapeOrScalarOrVectorOnDevice<java.lang.Integer> ofInts()
      +
      This is a simple convenience method which is simply calling the of(Class) method like so: of(Integer.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tensor builder API which in this case will lead to the creation + The returned WithShapeOrScalarOrVector is the next step in the + fluent Tensor builder API which in this case will lead to the creation of a tensor storing integers.
      -
      -
      Returns:
      -
      The next step of the Tensor builder API which exposes methods for defining shapes.
      +
      +
      Specified by:
      +
      ofInts in interface Nda<V>
      +
      Returns:
      +
      The next step of the Tensor builder API which exposes methods for defining shapes.
      -
    • -
    • -
      -

      ofShorts

      - -
      This is a simple convenience method which is simply calling the of(Class) +
    + + + +
      +
    • +

      ofShorts

      +
      static WithShapeOrScalarOrVectorOnDevice<java.lang.Short> ofShorts()
      +
      This is a simple convenience method which is simply calling the of(Class) method like so: of(Short.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tensor builder API which in this case will lead to the creation + The returned WithShapeOrScalarOrVector is the next step in the + fluent Tensor builder API which in this case will lead to the creation of a tensor storing shorts.
      -
      -
      Returns:
      -
      The next step of the Tensor builder API which exposes methods for defining shapes.
      +
      +
      Specified by:
      +
      ofShorts in interface Nda<V>
      +
      Returns:
      +
      The next step of the Tensor builder API which exposes methods for defining shapes.
      -
    • -
    • -
      -

      ofBytes

      - -
      This is a simple convenience method which is simply calling the of(Class) +
    + + + +
      +
    • +

      ofBytes

      +
      static WithShapeOrScalarOrVectorOnDevice<java.lang.Byte> ofBytes()
      +
      This is a simple convenience method which is simply calling the of(Class) method like so: of(Byte.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tensor builder API which in this case will lead to the creation + The returned WithShapeOrScalarOrVector is the next step in the + fluent Tensor builder API which in this case will lead to the creation of a tensor storing bytes.
      -
      -
      Returns:
      -
      The next step of the Tensor builder API which exposes methods for defining shapes.
      +
      +
      Specified by:
      +
      ofBytes in interface Nda<V>
      +
      Returns:
      +
      The next step of the Tensor builder API which exposes methods for defining shapes.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Double> of(double... value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Double> of(double... value)
      Constructs a vector of doubles based on the provided array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The array of doubles from which a 1D tensor ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of doubles.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Double> of(double value)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Double> of(double value)
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The scalar value which ought to be represented as tensor.
      -
      Returns:
      +
      Returns:
      A scalar double tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Float> of(float... value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Float> of(float... value)
      Constructs a vector of floats based on the provided array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The array of floats from which a 1D tensor ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of floats.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Float> of(float value)
      -
      Description copied from interface: Nda
      -
      Constructs a vector of floats based on the provided array.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Float> of(float value)
      +
      +
      Parameters:
      value - The scalar value which ought to be represented as tensor.
      -
      Returns:
      +
      Returns:
      A scalar float tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Byte> of(byte... value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Byte> of(byte... value)
      Constructs a vector of bytes based on the provided array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The array of bytes from which a 1D tensor ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of bytes.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Byte> of(byte value)
      -
      Description copied from interface: Nda
      -
      Constructs a vector of bytes based on the provided array.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Byte> of(byte value)
      +
      +
      Parameters:
      value - The scalar value which ought to be represented as tensor.
      -
      Returns:
      +
      Returns:
      A scalar byte tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Integer> of(int... value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Integer> of(int... value)
      Constructs a vector of ints based on the provided array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The array of ints from which a 1D tensor ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of ints.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Integer> of(int value)
      -
      Description copied from interface: Nda
      -
      Constructs a vector of ints based on the provided array.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Integer> of(int value)
      +
      +
      Parameters:
      value - The scalar value which ought to be represented as tensor.
      -
      Returns:
      +
      Returns:
      A scalar int tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Long> of(long... value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Long> of(long... value)
      Constructs a vector of longs based on the provided array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The array of longs from which a 1D tensor ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of longs.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Long> of(long value)
      -
      Description copied from interface: Nda
      -
      Constructs a vector of longs based on the provided array.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Long> of(long value)
      +
      +
      Parameters:
      value - The scalar value which ought to be represented as tensor.
      -
      Returns:
      +
      Returns:
      A scalar long tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Short> of(short... value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Short> of(short... value)
      Constructs a vector of shorts based on the provided array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The array of shorts from which a 1D tensor ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of shorts.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Short> of(short value)
      -
      Description copied from interface: Nda
      -
      Constructs a vector of shorts based on the provided array.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Short> of(short value)
      +
      +
      Parameters:
      value - The scalar value which ought to be represented as tensor.
      -
      Returns:
      +
      Returns:
      A scalar short tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Boolean> of(boolean... value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Boolean> of(boolean... value)
      Constructs a vector of booleans based on the provided array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      value - The array of booleans from which a 1D tensor ought to be constructed.
      -
      Returns:
      +
      Returns:
      A vector / 1D tensor of shorts.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(Class<V> valueType, - Shape shape, - Arg.Seed seed)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.Class<V> valueType,
      +                        Shape shape,
      +                        Arg.Seed seed)
      Use this to construct and return a seeded tensor of the specified type.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      valueType - The type class of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      -
      seed - An arbitrary String whose hash will be used to as a seed.
      -
      Returns:
      +
      seed - An arbitrary String whose hash will be used to as a seed.
      +
      Returns:
      A newly created and seeded tensor of the provided type and shape.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Double> of(Shape shape, - double value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Double> of(Shape shape,
      +                                   double value)
      Use this to construct and return a homogeneously populated double tensor of the specified shape.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      value - The value which ought to be used to populate the tensor homogeneously.
      -
      Returns:
      +
      Returns:
      A new tensor instance with the provided shape and initial value.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Double> of(Shape shape, - double[] values)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Double> of(Shape shape,
      +                                   double[] values)
      Use this to construct and return a double tensor of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the tensor will be populated based on repeated iteration over the provided double array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Integer> of(Shape shape, - int[] values)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Integer> of(Shape shape,
      +                                    int[] values)
      Use this to construct and return an int tensor of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the tensor will be populated based on repeated iteration over the provided int array.
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Byte> of(Shape shape, - byte[] values)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Byte> of(Shape shape,
      +                                 byte[] values)
      Use this to construct and return a byte tensor of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the tensor will be populated based on repeated iteration over the provided byte array..
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Long> of(Shape shape, - long[] values)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Long> of(Shape shape,
      +                                 long[] values)
      Use this to construct and return a long tensor of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the tensor will be populated based on repeated iteration over the provided long array..
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Short> of(Shape shape, - short[] values)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Short> of(Shape shape,
      +                                  short[] values)
      Use this to construct and return a short tensor of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the tensor will be populated based on repeated iteration over the provided short array..
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Float> of(Shape shape, - float[] values)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Float> of(Shape shape,
      +                                  float[] values)
      Use this to construct and return a float tensor of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the tensor will be populated based on repeated iteration over the provided float array..
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Float> of(Shape shape, - float value)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Float> of(Shape shape,
      +                                  float value)
      Use this to construct and return a homogeneously populated float tensor of the specified shape.
      -
      -
      Parameters:
      +
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      value - The value which ought to be used to populate the tensor homogeneously.
      -
      Returns:
      +
      Returns:
      A new tensor instance with the provided shape and initial value.
      -
    • -
    • -
      -

      of

      -
      static Tensor<Boolean> of(Shape shape, - boolean[] values)
      +
    + + + +
      +
    • +

      of

      +
      static Tensor<java.lang.Boolean> of(Shape shape,
      +                                    boolean[] values)
      Use this to construct and return a boolean tensor of the specified shape and initial values. The length of the provided array does not have to match the number of elements defined by the provided shape, the tensor will be populated based on repeated iteration over the provided boolean array..
      -
      -
      Parameters:
      +
      +
      Specified by:
      +
      of in interface Nda<V>
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      values - The values which ought to be used to populate the tensor.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(Shape shape, - Data<V> data)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(Shape shape,
      +                        Data<V> data)
      Use this to construct and return a tensor of the specified shape and data object.
      This method is typically used like this:
      
             Tsr<Integer> tensor = Tsr.of( Shape.of(2,3), Data.of(1,2,3,4,5,6) );
         
      The resulting tensor will have the shape [2,3] and the values [1,2,3,4,5,6].
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      data - The data object which contains the values to be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided shape and data.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(DataType<V> type, - Shape shape)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(DataType<V> type,
      +                        Shape shape)
      Use this to construct and return a tensor of the specified type and shape.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      type - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of any number of axis-sizes.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type and shape.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(Class<V> type, - Shape shape, - Object data)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.Class<V> type,
      +                        Shape shape,
      +                        java.lang.Object data)
      Use this to construct and return a tensor of the specified type, shape and data object.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      type - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of an array of axis-sizes.
      data - The data object which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(Class<V> type, - List<Integer> shape, - Object data)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.Class<V> type,
      +                        java.util.List<java.lang.Integer> shape,
      +                        java.lang.Object data)
      Use this to construct and return a tensor of the specified type, shape and data object.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      type - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of list of axis-sizes.
      data - The data object which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      -
    • -
    • -
      -

      of

      -
      static <V extends Number> Tensor<V> of(Class<V> type, - Shape shape, - Number data)
      +
    + + + +
      +
    • +

      of

      +
      static <V extends java.lang.Number> Tensor<V> of(java.lang.Class<V> type,
      +                                                 Shape shape,
      +                                                 java.lang.Number data)
      Use this to construct and return a tensor of the specified type, shape and number.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      type - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of a immutable tuple of axis-sizes.
      data - The data object which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      -
    • -
    • -
      -

      ofAny

      -
      static <V> Tensor<V> ofAny(Class<V> type, - Shape shape, - Object data)
      +
    + + + +
      +
    • +

      ofAny

      +
      static <V> Tensor<V> ofAny(java.lang.Class<V> type,
      +                           Shape shape,
      +                           java.lang.Object data)
      Use this to construct and return a tensor of the specified type, shape and data object.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      type - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of a immutable tuple of axis-sizes.
      data - The data object which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(Class<V> type, - List<Integer> shape, - List<V> data)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.Class<V> type,
      +                        java.util.List<java.lang.Integer> shape,
      +                        java.util.List<V> data)
      Use this to construct and return a tensor of the specified type, shape and data object.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      type - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of a list of axis-sizes.
      data - The list of items which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(Class<V> type, - Shape shape, - List<V> data)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.Class<V> type,
      +                        Shape shape,
      +                        java.util.List<V> data)
      Use this to construct and return a tensor of the specified type, shape and list of items. Here a simple usage example:
      
             Tsr<Float> tensor = Tsr.of( Float.class, Shape.of(2,3), List.of(1f,2f,3f,4f,5f,6f) );
         
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      type - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of an immutable tuple of axis-sizes.
      data - The list of items which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(DataType<V> dataType, - List<Integer> shape, - List<V> data)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(DataType<V> dataType,
      +                        java.util.List<java.lang.Integer> shape,
      +                        java.util.List<V> data)
      Use this to construct and return a tensor of the specified type, shape and data object.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      dataType - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of a list of axis-sizes.
      data - The data object which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(DataType<V> dataType, - Shape shape, - List<V> data)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(DataType<V> dataType,
      +                        Shape shape,
      +                        java.util.List<V> data)
      Use this to construct and return a tensor of the specified type, shape and a list of items. Here a simple usage example:
      
             Tsr<Integer> tensor = Tsr.of( DataType.F32, Shape.of(2,3), List.of(1,2,3,4,5,6) );
         
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      +
      Parameters:
      dataType - The type of the items stored by the resulting tensor.
      shape - The shape of the resulting tensor consisting of an immutable tuple of axis-sizes.
      data - The list of items which will be used to populate the tensor.
      -
      Returns:
      +
      Returns:
      A newly created tensor of the provided type, shape and data.
      - -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(DataType<V> dataType, - Shape shape, - Object data)
      -
      This factory method is among the most flexible and forgiving ways to create a Tensor instance. - It receives a DataType for type safety and to ensure that the produced Tensor instance - will contain elements of the correct type, and a Shape tuple which stores the sizes of the axes that the - instance ought to possess, and finally it receives a data Object which can be anything ranging from - a List to an array or simply a single value which ought to fill out the entire Tensor.
      -
      -
      Parameters:
      -
      dataType - The data type of the data represented by Tensor instance created by this method.
      -
      shape - An immutable tuple of axis sizes describing the dimensionality of the Tensor created by this method.
      -
      data - The data for the Tensor that is about to be created, which can be a list, an array or scalar.
      -
      Returns:
      -
      A new Tensor instance of the specified type, shape and containing the provided data.
      -
      -
      -
    • -
    • -
      -

      of

      -
      static <V extends N, -N> Tensor<V> of(DataType<V> dataType, - Device<N> device, - Shape shape, - Object data)
      -
      This factory method is among the most flexible and forgiving ways to create a Tensor instance. - It receives a DataType for type safety and to ensure that the produced Tensor instance - will contain elements of the correct type, and a Shape tuple which stores the sizes of the axes that the - instance ought to possess, and finally it receives a data Object which can be anything ranging from - a List to an array or simply a single value which ought to fill out the entire Tensor.
      -
      -
      Parameters:
      -
      dataType - The data type of the data represented by Tensor instance created by this method.
      +
    • +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(DataType<V> dataType,
      +                        Shape shape,
      +                        java.lang.Object data)
      +
      This factory method is among the most flexible and forgiving ways to create a Tensor instance. + It receives a DataType for type safety and to ensure that the produced Tensor instance + will contain elements of the correct type, and a Shape tuple which stores the sizes of the axes that the + instance ought to possess, and finally it receives a data Object which can be anything ranging from + a List to an array or simply a single value which ought to fill out the entire Tensor.
      +
      +
      Parameters:
      +
      dataType - The data type of the data represented by Tensor instance created by this method.
      +
      shape - An immutable tuple of axis sizes describing the dimensionality of the Tensor created by this method.
      +
      data - The data for the Tensor that is about to be created, which can be a list, an array or scalar.
      +
      Returns:
      +
      A new Tensor instance of the specified type, shape and containing the provided data.
      +
      +
    • +
    + + + +
      +
    • +

      of

      +
      static <V extends N,N> Tensor<V> of(DataType<V> dataType,
      +                                    Device<N> device,
      +                                    Shape shape,
      +                                    java.lang.Object data)
      +
      This factory method is among the most flexible and forgiving ways to create a Tensor instance. + It receives a DataType for type safety and to ensure that the produced Tensor instance + will contain elements of the correct type, and a Shape tuple which stores the sizes of the axes that the + instance ought to possess, and finally it receives a data Object which can be anything ranging from + a List to an array or simply a single value which ought to fill out the entire Tensor.
      +
      +
      Parameters:
      +
      dataType - The data type of the data represented by Tensor instance created by this method.
      device - The device on which the tensor will be stored.
      -
      shape - An immutable tuple of axis sizes describing the dimensionality of the Tensor created by this method.
      -
      data - The data for the Tensor that is about to be created, which can be a list, an array or scalar.
      -
      Returns:
      -
      A new Tensor instance of the specified type, shape and containing the provided data.
      -
      - -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(DataType<V> dataType, - NDConstructor ndConstructor, - Data<V> data)
      +
      shape - An immutable tuple of axis sizes describing the dimensionality of the Tensor created by this method.
      +
      data - The data for the Tensor that is about to be created, which can be a list, an array or scalar.
      +
      Returns:
      +
      A new Tensor instance of the specified type, shape and containing the provided data.
      + +
    • +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(DataType<V> dataType,
      +                        NDConstructor ndConstructor,
      +                        Data<V> data)
      This factory method a raw tensor constructor which will not perform any type checking or data conversion on the data provided to it. It constructs the tensor expecting that the data provided to it is of the correct type and an array of axis sizes.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of individual tensor items.
      -
      Parameters:
      -
      dataType - The data type of the data represented by Tensor instance created by this method.
      -
      ndConstructor - The NDConstructor that will be used to construct the Tensor instance.
      -
      data - The data for the Tensor that is about to be created, which is expected to be an array.
      -
      Returns:
      -
      A new Tensor instance of the specified type, shape and containing the provided data.
      -
      - -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(DataType<T> type, - List<Integer> shape, - Filler<T> filler)
      +
      Parameters:
      +
      dataType - The data type of the data represented by Tensor instance created by this method.
      +
      ndConstructor - The NDConstructor that will be used to construct the Tensor instance.
      +
      data - The data for the Tensor that is about to be created, which is expected to be an array.
      +
      Returns:
      +
      A new Tensor instance of the specified type, shape and containing the provided data.
      + +
    • +
    + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(DataType<T> type,
      +                        java.util.List<java.lang.Integer> shape,
      +                        Filler<T> filler)
      This factory method allows the creation of tensors with an additional initialization lambda for filling the underlying data array with desired values. Other than regular numeric types it is also possible to initialize the @@ -2341,22 +2796,25 @@

      of

      Therefore the constructor requires not only a shape as argument but also the data type which ought to be allocated as well as the initialization lambda which will be called iteratively.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter for the actual data array items.
      -
      Parameters:
      +
      Parameters:
      type - The data type this tensor ought to have.
      shape - The shape of this new tensor ought to have.
      filler - The lambda Object which ought to fill this tensor with the appropriate data.
      -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(DataType<T> type, - Shape shape, - Filler<T> filler)
      +
    + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(DataType<T> type,
      +                        Shape shape,
      +                        Filler<T> filler)
      This factory method allows the creation of tensors with an additional initialization lambda for filling the underlying data array with desired values. Other than regular numeric types it is also possible to initialize the @@ -2369,22 +2827,25 @@

      of

      
             Tsr<Double> tensor = Tsr.of( DataType.F64, Shape.of(2, 3), (i, j) -> i + j );
         
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter for the actual data array items.
      -
      Parameters:
      +
      Parameters:
      type - The data type this tensor ought to have.
      shape - The shape of this new tensor ought to have.
      filler - The lambda Object which ought to fill this tensor with the appropriate data.
      -
    • -
    • -
      -

      of

      -
      static <T> Tensor<T> of(Class<T> type, - Shape shape, - Filler<T> filler)
      +
    + + + +
      +
    • +

      of

      +
      static <T> Tensor<T> of(java.lang.Class<T> type,
      +                        Shape shape,
      +                        Filler<T> filler)
      This factory method allows the creation of tensors with an additional initialization lambda for filling the underlying data array with desired values. Other than regular numeric types it is also possible to initialize the @@ -2393,29 +2854,34 @@

      of

      Therefore the constructor requires not only a shape as argument but also the data type which ought to be allocated as well as the initialization lambda which will be called iteratively.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter for the actual data array items.
      -
      Parameters:
      +
      Parameters:
      type - The data type class the items of this tensor ought to have.
      shape - The shape of this new tensor ought to have.
      filler - The lambda Object which ought to fill this tensor with the appropriate data.
      -
    • -
    • -
      -

      of

      -
      @SafeVarargs -static <V extends Number> Tensor<V> of(String expression, - V... inputs)
      -
      This factory method allows for the creation and execution of Function instances +
    + + + + + +
      +
    • +

      of

      +
      @SafeVarargs
      +static <V extends java.lang.Number> Tensor<V> of(java.lang.String expression,
      +                                                              V... inputs)
      +
      This factory method allows for the creation and execution of Function instances without actually instantiating them manually, where the result will then be returned by this factory method.

      - The passed String will be parsed into a Function AST which will be cached + The passed String will be parsed into a Function AST which will be cached using the expression as key in case it will be used in future constructor calls like this one, or elsewhere... - The created / retrieved Function will then be called with the supplied input list + The created / retrieved Function will then be called with the supplied input list in order to trigger an execution. The result of which will be used for the population of the fields of this very instance.
      @@ -2423,25 +2889,28 @@

      of

      • 'var a = Tsr.of( "sin( I[0] ) / I[1]", 12f, -6.34f )'
      -
      -
      Parameters:
      +
      +
      Parameters:
      expression - A String which will be used for parsing a Function AST.
      inputs - An array of inputs which can be tensors or numeric types.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(String expression, - List<Tensor<V>> inputs)
      -
      This factory method allows for the creation and execution of Function instances +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.String expression,
      +                        java.util.List<Tensor<V>> inputs)
      +
      This factory method allows for the creation and execution of Function instances without actually instantiating them manually, where the result will then be returned by this factory method.

      - The passed String will be parsed into a Function AST which will be cached + The passed String will be parsed into a Function AST which will be cached using the expression as key in case it will be used in future constructor calls like this one, or elsewhere... - The created / retrieved Function will then be called with the supplied input list + The created / retrieved Function will then be called with the supplied input list in order to trigger an execution. The result of which will be used for the population of the fields of this very instance.
      @@ -2449,24 +2918,27 @@

      of

      • 'var a = Tsr.of( "sin( I[0] ) / I[1]", List.of(b, c) )'
      -
      -
      Parameters:
      +
      +
      Parameters:
      expression - A String which will be used for parsing a Function AST.
      inputs - A list of inputs which can be tensors or numeric types.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(String expression, - boolean doAD, - List<Tensor<V>> tensors)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.String expression,
      +                        boolean doAD,
      +                        java.util.List<Tensor<V>> tensors)
      This method takes a list of tensors and a String expression describing operations which ought to be applied to the tensors in said list. It also receives a boolean flag which determines if the defined function should be executed with autograd enabled. - The provided expression will be parsed to a Function instance expecting as many inputs + The provided expression will be parsed to a Function instance expecting as many inputs as there are array entries, namely : "I[0]", "I[1]", "I[2]", ...
      An example would be the following :
        @@ -2475,25 +2947,28 @@

        of

        Which takes the tensor 'b' and 'c' and applies the function "f(x,y) = sin(x) / y" element-wise to produce a new tensor 'a'! Additionally, there is a helpful flag which allows one to specify if the - parsed Function instance emerging from the provided expression - should also allow the tracking of computations via a computation graph (GraphNode instances). + parsed Function instance emerging from the provided expression + should also allow the tracking of computations via a computation graph (GraphNode instances). This history tracking then enables auto-differentiation.
      -
      -
      Parameters:
      +
      +
      Parameters:
      expression - The expression describing operations applied to the provided tensors.
      doAD - A flag which when set to true commands the creation of a computation graph during operation execution.
      tensors - A list of tensors used as inputs to the Function instance parsed from the provided expression.
      -
    • -
    • -
      -

      of

      -
      static <V> Tensor<V> of(String expression, - Tensor<V> tensor)
      +
    + + + +
      +
    • +

      of

      +
      static <V> Tensor<V> of(java.lang.String expression,
      +                        Tensor<V> tensor)
      This method takes a tensor and a String expression describing operations which ought to be applied to said tensor. - This expression will be parsed to a Function instance expecting one input, + This expression will be parsed to a Function instance expecting one input, namely : "I[0]"
      An example would be the following :
        @@ -2503,22 +2978,25 @@

        of

        Which takes the tensor 'b' and applies the function "f(x) = sin(x) * 2" element-wise to produce a new tensor 'a'!

      -
      -
      Parameters:
      +
      +
      Parameters:
      tensor - A tensor which serves as input to the Function instance parsed from the given expression.
      expression - The expression describing operations applied to the provided tensor.
      -
    • -
    • -
      -

      of

      -
      @SafeVarargs -static <V> Tensor<V> of(String expression, - Tensor<V>... tensors)
      +
    + + + +
      +
    • +

      of

      +
      @SafeVarargs
      +static <V> Tensor<V> of(java.lang.String expression,
      +                                     Tensor<V>... tensors)
      This method takes an array of tensors and a String expression describing operations which ought to be applied to the tensors in said array. - This expression will be parsed to a Function instance expecting as many inputs + This expression will be parsed to a Function instance expecting as many inputs as there are array entries, namely : "I[0]", "I[1]", "I[2]", ...
      An example would be the following :
        @@ -2527,25 +3005,28 @@

        of

        Which takes the tensor 'b' and 'c' and applies the function "f(x,y) = sin(x) / y" element-wise to produce a new tensor 'a'!
      -
      -
      Parameters:
      +
      +
      Parameters:
      expression - The expression describing operations applied to the provided tensors.
      tensors - An array of tensors used as inputs to the Function instance parsed from the provided expression.
      -
    • -
    • -
      -

      of

      -
      @SafeVarargs -static <V> Tensor<V> of(String expression, - boolean doAD, - Tensor<V>... tensors)
      +
    + + + +
      +
    • +

      of

      +
      @SafeVarargs
      +static <V> Tensor<V> of(java.lang.String expression,
      +                                     boolean doAD,
      +                                     Tensor<V>... tensors)
      This method takes an array of tensors and a String expression describing operations which ought to be applied to the tensors in said array. It also receives a boolean flag which determines if the defined function should be executed with autograd enabled. - The provided expression will be parsed to a Function instance expecting as many inputs + The provided expression will be parsed to a Function instance expecting as many inputs as there are array entries, namely : "I[0]", "I[1]", "I[2]", ...
      An example would be the following :
        @@ -2554,163 +3035,192 @@

        of

        Which takes the tensor 'b' and 'c' and applies the function "f(x,y) = sin(x) / y" element-wise to produce a new tensor 'a'! Additionally, there is a helpful flag which allows one to specify if the - parsed Function instance emerging from the provided expression - should also allow the tracking of computations via a computation graph (GraphNode instances). + parsed Function instance emerging from the provided expression + should also allow the tracking of computations via a computation graph (GraphNode instances). This history tracking then enables auto-differentiation.
      -
      -
      Parameters:
      +
      +
      Parameters:
      expression - The expression describing operations applied to the provided tensors.
      doAD - A flag which when set to true commands the creation of a computation graph during operation execution.
      tensors - An array of tensors used as inputs to the Function instance parsed from the provided expression.
      -
    • -
    • -
      -

      ofRandom

      -
      static <V> Tensor<V> ofRandom(Class<V> valueTypeClass, - int... shape)
      +
    + + + +
      +
    • +

      ofRandom

      +
      static <V> Tensor<V> ofRandom(java.lang.Class<V> valueTypeClass,
      +                              int... shape)
      This factory method produces a randomly populated tensor of the provided type and shape using a hard coded default seed. If the provided type class is representing a - floating point number type (like Double or Float) then the random numbers will + floating point number type (like Double or Float) then the random numbers will be gaussian ("normally") distributed values with mean 0.0 and standard deviation 1.0.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      V - The type parameter of the values stored by the returned tensor.
      -
      Parameters:
      +
      Parameters:
      valueTypeClass - The type class of the values stored by the returned tensor.
      shape - The shape of the tensor produced by this factory method.
      -
      Returns:
      +
      Returns:
      A randomly filled tensor of the provided type.
      -
    • -
    • -
      -

      like

      -
      static <V> IterByOrIterFromOrAllTensor<V> like(Tensor<V> template)
      +
    + + + +
      +
    • +

      like

      +
      static <V> IterByOrIterFromOrAllTensor<V> like(Tensor<V> template)
      Use this factory method to instantiate a new tensor with the same data type, shape - and memory location (Device instance) as the provided template tensor.
      -
      -
      Type Parameters:
      + and memory location (Device instance) as the provided template tensor. +
      +
      Type Parameters:
      V - The type parameter defining the value type of the provided as well as returned tensor.
      -
      Parameters:
      +
      Parameters:
      template - The template tensor whose type, shape and location should be taken to construct a new tensor.
      -
      Returns:
      -
      A new Tensor instance with the same data type, shape and memory location as the provided template.
      +
      Returns:
      +
      A new Tensor instance with the same data type, shape and memory location as the provided template.
      -
    • -
    • -
      -

      shaped

      -
      static <T> Collector<T,?,Tensor<T>> shaped(int... shape)
      +
    + + + +
      +
    • +

      shaped

      +
      static <T> java.util.stream.Collector<T,?,Tensor<T>> shaped(int... shape)
      Returns a Collector that accumulates the input elements into a - new Tensor with the specified shape.
      + new Tensor with the specified shape.
      Usage example :
      
           var tensor = Stream.of( 1, 2, 3, 4, 5, 6 )
                             .collect( Tsr.shaped( 2, 3 ) );
        
      -
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      shaped in interface Nda<V>
      +
      Type Parameters:
      T - the type of the input elements
      -
      Parameters:
      +
      Parameters:
      shape - The shape of the tensor to be returned.
      -
      Returns:
      +
      Returns:
      a Collector which collects all the input elements into a - Tensor, in encounter order.
      + Tensor, in encounter order.
      -
    • -
    • -
      -

      shaped

      -
      static <T> Collector<T,?,Tensor<T>> shaped(Shape shape)
      +
    + + + +
      +
    • +

      shaped

      +
      static <T> java.util.stream.Collector<T,?,Tensor<T>> shaped(Shape shape)
      Returns a Collector that accumulates the input elements into a - new Tensor with the specified shape.
      + new Tensor with the specified shape.
      Usage example :
      
           var tensor = Stream.of( 1, 2, 3, 4, 5, 6 )
                             .collect( Tsr.shaped( otherTensor.shape() ) );
        
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - the type of the input elements
      -
      Parameters:
      +
      Parameters:
      shape - The shape of the tensor to be returned.
      -
      Returns:
      +
      Returns:
      a Collector which collects all the input elements into a - Tensor, in encounter order.
      + Tensor, in encounter order.
      -
    • -
    • -
      -

      setRqsGradient

      -
      Tensor<V> setRqsGradient(boolean rqsGradient)
      +
    + + + +
      +
    • +

      setRqsGradient

      +
      Tensor<V> setRqsGradient(boolean rqsGradient)
      Setting this flag to true will tell the autograd system to accumulate gradients at this tensor. This is achieved by allowing for the recording of a computation graph for when this tensor is used in any autograd supporting operations. This allows the autograd / auto-differentiation system to traverse said graph - for when the backward() method is called + for when the backward() method is called on any descendant tensor at the most recent end of the computation graph.
      -
      -
      Parameters:
      +
      +
      Parameters:
      rqsGradient - The truth value determining if this tensor ought to receive gradients via the built-in automatic backpropagation system.
      -
      Returns:
      -
      This very Tensor instance in order to enable method chaining.
      +
      Returns:
      +
      This very Tensor instance in order to enable method chaining.
      -
    • -
    • -
      -

      rqsGradient

      -
      boolean rqsGradient()
      +
    + + + +
      +
    • +

      rqsGradient

      +
      boolean rqsGradient()
      This flag will indirectly trigger the activation of the autograd / auto-differentiation system of this library! If the flag is set to 'true' and the tensor is used for computation then - it will also receive gradients when the backward() method is being called + it will also receive gradients when the backward() method is being called on any descendant tensor within the computation graph.
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this tensor ought to receive gradients via the built-in automatic backpropagation system.
      -
    • -
    • -
      -

      isIntermediate

      -
      boolean isIntermediate()
      +
    + + + +
      +
    • +

      isIntermediate

      +
      boolean isIntermediate()
      Intermediate tensors are internal non-user tensors which may be eligible - for deletion when further consumed by a Function. + for deletion when further consumed by a Function. For the casual user of Neureka, this flag should always be false!
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this tensor is not a user tensor but an internal - tensor which may be eligible for deletion by Functions consuming it.
      + tensor which may be eligible for deletion by Functions consuming it.
      -
    • -
    • -
      -

      isOutsourced

      -
      default boolean isOutsourced()
      -
      Outsourced means that the tensor is stored on a Device implementation instance which is not the CPU.
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      isOutsourced

      +
      default boolean isOutsourced()
      +
      Outsourced means that the tensor is stored on a Device implementation instance which is not the CPU.
      +
      +
      Returns:
      The truth value determining if the data of this tensor is not actually stored inside it in the form of a traditional primitive JVM array!
      -
    • -
    • -
      -

      isVirtual

      -
      boolean isVirtual()
      +
    + + + +
      +
    • +

      isVirtual

      +
      boolean isVirtual()
      A Virtual tensor is a tensor whose underlying data array is of size 1, holding only a single value.
      This only makes sense for homogeneously populated tensors. An example of such a tensor would be:
      @@ -2718,202 +3228,233 @@

      isVirtual

      The reasons for this feature is that it greatly improves performance in certain cases. In essence this feature is a form of lazy loading.

      - Use MutateTensor.setIsVirtual(boolean) to "actualize" a "virtual" tensor, and vise versa.

      -
      -
      Returns:
      + Use MutateTensor.setIsVirtual(boolean) to "actualize" a "virtual" tensor, and vise versa. +
      +
      Returns:
      The truth value determining if this tensor is "virtual" or "actual".
      -
    • -
    • -
      -

      isDeleted

      -
      boolean isDeleted()
      -
      This will check if the MutateTensor.delete() method was previously called on this tensor. +
    + + + +
      +
    • +

      isDeleted

      +
      boolean isDeleted()
      +
      This will check if the MutateTensor.delete() method was previously called on this tensor. This means that the tensor data was freed on every device and any references inside the tensor are null (to be eligable for garbage collection).
      -
      -
      Returns:
      -
      The truth value determining if the MutateTensor.delete() method has been called oin this instance.
      +
      +
      Returns:
      +
      The truth value determining if the MutateTensor.delete() method has been called oin this instance.
      -
    • -
    • -
      -

      isEmpty

      -
      default boolean isEmpty()
      -
      A tensor is empty if it's Data storage is null. +
    + + + +
      +
    • +

      isEmpty

      +
      default boolean isEmpty()
      +
      A tensor is empty if it's Data storage is null. This is true for deleted tensors or tensors which have not been initialized yet.
      -
      -
      Returns:
      -
      The truth value determining if this tensor has no Data.
      -
      - -
    • -
    • -
      -

      isUndefined

      -
      default boolean isUndefined()
      -
      A tensor is "undefined" if it has either no NDConfiguration implementation instance - or this instance does not have a shape set for this Tensor which is needed for +
      +
      Returns:
      +
      The truth value determining if this tensor has no Data.
      +
      +
    • +
    + + + +
      +
    • +

      isUndefined

      +
      default boolean isUndefined()
      +
      A tensor is "undefined" if it has either no NDConfiguration implementation instance + or this instance does not have a shape set for this Tensor which is needed for a tensor to also have a rank and dimensionality...
      -
      -
      Returns:
      -
      The truth value determining if this tensor has an NDConfiguration stored internally.
      +
      +
      Returns:
      +
      The truth value determining if this tensor has an NDConfiguration stored internally.
      -
    • -
    • -
      -

      isSlice

      -
      default boolean isSlice()
      +
    + + + +
      +
    • +

      isSlice

      +
      default boolean isSlice()
      If this nd-array is a slice of a parent nd-array then this method will yield true. - Slices can be created by calling the variations of the "Nda.getAt(int...)" method.
      -
      -
      Specified by:
      -
      isSlice in interface Nda<V>
      -
      Returns:
      + Slices can be created by calling the variations of the "Nda.getAt(int...)" method. +
      +
      Specified by:
      +
      isSlice in interface Nda<V>
      +
      Returns:
      The truth value determining if this nd-array is a slice of another nd-array.
      -
      See Also:
      -
      - -
      +
      See Also:
      +
      Nda.getAt(int...), +Nda.slice()
      -
    • -
    • -
      -

      isShallowCopy

      -
      default boolean isShallowCopy()
      +
    + + + +
      +
    • +

      isShallowCopy

      +
      default boolean isShallowCopy()
      If this nd-array is a shallow copy of a parent nd-array then this method will yield true. - Shallow copies can be created by calling the "Nda.shallowCopy()" method.
      -
      -
      Specified by:
      -
      isShallowCopy in interface Nda<V>
      -
      Returns:
      + Shallow copies can be created by calling the "Nda.shallowCopy()" method. +
      +
      Specified by:
      +
      isShallowCopy in interface Nda<V>
      +
      Returns:
      The truth value determining if this nd-array is a shallow copy of another nd-array.
      -
      See Also:
      -
      - -
      +
      See Also:
      +
      Nda.shallowCopy()
      -
    • -
    • -
      -

      isPartialSlice

      -
      default boolean isPartialSlice()
      +
    + + + +
      +
    • +

      isPartialSlice

      +
      default boolean isPartialSlice()
      If this nd-array is a partial slice of a parent nd-array then this method will yield true. A partial slice is a slice which does not view all the parents items. - Partial slices can be created by calling the variations of the "Nda.getAt(int...)" method. - This is the inverse of Nda.isFullSlice().
      -
      -
      Specified by:
      -
      isPartialSlice in interface Nda<V>
      -
      Returns:
      + Partial slices can be created by calling the variations of the "Nda.getAt(int...)" method. + This is the inverse of Nda.isFullSlice(). +
      +
      Specified by:
      +
      isPartialSlice in interface Nda<V>
      +
      Returns:
      The truth value determining if this nd-array is a partial slice of another nd-array.
      -
    • -
    • -
      -

      sliceCount

      -
      default int sliceCount()
      +
    + + + +
      +
    • +

      sliceCount

      +
      default int sliceCount()
      This method returns the number of slices which have been created from this nd-array. - It does so by accessing the Relation component if present + It does so by accessing the Relation component if present which internally keeps track of slices via weak references.
      -
      -
      Specified by:
      -
      sliceCount in interface Nda<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      sliceCount in interface Nda<V>
      +
      Returns:
      The number of slices derived from this nd-array.
      -
    • -
    • -
      -

      isSliceParent

      -
      default boolean isSliceParent()
      +
    + + + +
      +
    • +

      isSliceParent

      +
      default boolean isSliceParent()
      If slices have been derived from this nd-array then it is a "slice parent". This is what this method will determine, in which case, it will return true.
      -
      -
      Specified by:
      -
      isSliceParent in interface Nda<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      isSliceParent in interface Nda<V>
      +
      Returns:
      The truth value determining if slices have been derived from this nd-array.
      -
    • -
    • -
      -

      belongsToGraph

      -
      default boolean belongsToGraph()
      -
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. +
    + + + +
      +
    • +

      belongsToGraph

      +
      default boolean belongsToGraph()
      +
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. This is because autograd requires recording a computation graph for back-prop traversal. - This autograd system however, will only be triggered by Function implementations which - are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
      - Detached functions (like those pre-instantiated in Function.Detached.*) will not attach GraphNode + This autograd system however, will only be triggered by Function implementations which + are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
      + Detached functions (like those pre-instantiated in Function.Detached.*) will not attach GraphNode instances to involved tensors which will prevent the formation of a computation graph.
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this tensor belongs to a recorded computation graph.
      -
    • -
    • -
      -

      isLeave

      -
      default boolean isLeave()
      -
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. +
    + + + +
      +
    • +

      isLeave

      +
      default boolean isLeave()
      +
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. This is because autograd requires recording a computation graph for back-prop traversal. - This autograd system however, will only be triggered by Function implementations which - are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
      + This autograd system however, will only be triggered by Function implementations which + are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
      A tensor is a leave if it is attached to a computation graph in which it is not an intermediate / branch node but input / branch node.
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this tensor is attached to a computation graph as leave node.
      -
    • -
    • -
      -

      isBranch

      -
      default boolean isBranch()
      -
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. +
    + + + +
      +
    • +

      isBranch

      +
      default boolean isBranch()
      +
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. This is because autograd requires recording a computation graph for back-prop traversal. - This autograd system however, will only be triggered by Function implementations which - are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
      + This autograd system however, will only be triggered by Function implementations which + are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
      A tensor is a branch if it is attached to a computation graph in which it is not an input / leave node but intermediate / branch node.
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this tensor is attached to a computation graph as branch node.
      -
    • -
    • -
      -

      hasGradient

      -
      default boolean hasGradient()
      +
    + + + +
      +
    • +

      hasGradient

      +
      default boolean hasGradient()
      Tensors can be components of other tensors which makes the implicitly their gradients.
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this tensor has another tensor attached to it (which is its gradient).
      -
    • -
    • -
      -

      gradientApplyRequested

      -
      boolean gradientApplyRequested()
      +
    + + + +
      +
    • +

      gradientApplyRequested

      +
      boolean gradientApplyRequested()
      This flag works alongside two autograd features which can be enabled inside the library settings. They will come into effect when flipping their feature flags,
      namely: 'isApplyingGradientWhenRequested' and 'isApplyingGradientWhenTensorIsUsed'
      @@ -2926,16 +3467,19 @@

      gradientApplyRequested

      This signal comes in the form of a "request" flag which marks a tensor as allowed to be updated by its gradient.

      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if the application of the gradient of this tensor is requested.
      -
    • -
    • -
      -

      setGradientApplyRequested

      -
      Tensor<V> setGradientApplyRequested(boolean applyRequested)
      +
    + + + +
      +
    • +

      setGradientApplyRequested

      +
      Tensor<V> setGradientApplyRequested(boolean applyRequested)
      This flag works alongside two autograd features which can be enabled inside the library settings. They will come into effect when flipping their feature flags,
      namely: 'isApplyingGradientWhenRequested' and 'isApplyingGradientWhenTensorIsUsed'
      @@ -2948,77 +3492,92 @@

      setGradientApplyRequested

      This signal comes in the form of a "request" flag which marks a tensor as allowed to be updated by its gradient.

      -
      -
      Parameters:
      +
      +
      Parameters:
      applyRequested - The truth value determining if the application of the gradient of this tensor is requested.
      -
      Returns:
      +
      Returns:
      This very tensor instance in order to enable method chaining.
      -
    • -
    • -
      -

      update

      -
      default boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
      -
      Important : Components of type Tensor are simply gradients! +
    + + + +
      +
    • +

      update

      +
      default boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
      +
      Important : Components of type Tensor are simply gradients! Currently, this method is used only to catch illegal arguments which is for example the case when trying to attach a gradient with a different shape... (Otherwise the gradient tensor "does not mind" an owner change...)
      -
      -
      Specified by:
      -
      update in interface Component<V>
      -
      Parameters:
      -
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      -
      Returns:
      +
      +
      Specified by:
      +
      update in interface Component<Tensor<V>>
      +
      Parameters:
      +
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      +
      Returns:
      The truth value determining if the state change should be aborted or not.
      -
    • -
    • -
      -

      getVersion

      -
      int getVersion()
      +
    + + + +
      +
    • +

      getVersion

      +
      int getVersion()
      The version number is tracking how often this tensor has been mutated. This is especially useful for checking the correcting of auto-grad!
      -
    • -
    • -
      -

      getDataType

      -
      DataType<V> getDataType()
      -
      This method returns the DataType instance of this Tensor, which is +
    + + + +
      +
    • +

      getDataType

      +
      DataType<V> getDataType()
      +
      This method returns the DataType instance of this Tensor, which is a wrapper object for the actual type class representing the value items stored inside the underlying data array of this tensor.
      -
      -
      Returns:
      -
      The DataType instance of this Tensor storing important type information.
      -
      - -
    • -
    • -
      -

      getRepresentativeItemClass

      -
      Class<?> getRepresentativeItemClass()
      -
      The Class returned by this method is the representative Class of the - value items of a concrete AbstractNda but not necessarily the actual Class of +
      +
      Returns:
      +
      The DataType instance of this Tensor storing important type information.
      +
      +
    • +
    + + + +
      +
    • +

      getRepresentativeItemClass

      +
      java.lang.Class<?> getRepresentativeItemClass()
      +
      The Class returned by this method is the representative Class of the + value items of a concrete AbstractNda but not necessarily the actual Class of a given value item, this is especially true for numeric types, which are represented by - implementations of the NumericType interface.
      - For example in the case of a tensor of type Double, this method would - return F64 which is the representative class of Double.
      - Calling the Nda.getItemType() method instead of this method would return the actual value - type class, namely: Double.
      -
      -
      Returns:
      + implementations of the NumericType interface.
      + For example in the case of a tensor of type Double, this method would + return F64 which is the representative class of Double.
      + Calling the Nda.getItemType() method instead of this method would return the actual value + type class, namely: Double. +
      +
      Returns:
      The representative type class of individual value items within this concrete AbstractNda - extension instance which might also be subclasses of the NumericType interface + extension instance which might also be subclasses of the NumericType interface to model unsigned types or other JVM foreign numeric concepts.
      -
    • -
    • -
      -

      getMut

      -
      MutateTensor<V> getMut()
      +
    + + + +
      +
    • +

      getMut

      +
      MutateTensor<V> getMut()
      This method exposes an API for mutating the state of this tensor. The usage of methods exposed by this API is generally discouraged because the exposed state can easily lead to broken tensors and exceptional situations!
      @@ -3028,18 +3587,21 @@

      getMut

      performance is critical!
      (Like in custom backend extensions for example)
      -
      -
      Specified by:
      -
      getMut in interface Nda<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      getMut in interface Nda<V>
      +
      Returns:
      The unsafe API exposes methods for mutating the state of the tensor.
      -
    • -
    • -
      -

      mut

      -
      default MutateTensor<V> mut()
      +
    + + + +
      +
    • +

      mut

      +
      default MutateTensor<V> mut()
      This method exposes an API for mutating the state of this tensor. The usage of methods exposed by this API is generally discouraged because the exposed state can easily lead to broken tensors and exceptional situations!
      @@ -3049,18 +3611,21 @@

      mut

      performance is critical!
      (Like custom backend extensions for example)
      -
      -
      Specified by:
      -
      mut in interface Nda<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      mut in interface Nda<V>
      +
      Returns:
      The unsafe API exposes methods for mutating the state of the tensor.
      -
    • -
    • -
      -

      reshape

      -
      default Tensor<V> reshape(int... shape)
      +
    + + + +
      +
    • +

      reshape

      +
      default Tensor<V> reshape(int... shape)
      Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape. When possible, the returned nd-array will be a view of this nd-array. @@ -3070,124 +3635,145 @@

      reshape

      Keep in mind that the new shape must have the same number of elements as the original shape.

      This operation supports autograd.
      -
      -
      Specified by:
      -
      reshape in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      reshape in interface Nda<V>
      +
      Parameters:
      shape - The new shape of the returned nd-array.
      -
      Returns:
      +
      Returns:
      A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
      -
    • -
    • -
      -

      permute

      -
      default Tensor<V> permute(int... dims)
      +
    + + + +
      +
    • +

      permute

      +
      default Tensor<V> permute(int... dims)
      Returns a view of the original tensor input with its dimensions permuted.
      Consider a 3-dimensional tensor x with shape (2×3×5), then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
      -
      -
      Specified by:
      -
      permute in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      permute in interface Nda<V>
      +
      Parameters:
      dims - The desired ordering of dimensions
      -
      Returns:
      +
      Returns:
      A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
      -
    • -
    • -
      -

      transpose

      -
      default Tensor<V> transpose(int dim1, - int dim2)
      +
    + + + +
      +
    • +

      transpose

      +
      default Tensor<V> transpose(int dim1,
      +                            int dim2)
      Returns a view of the original tensor input the targeted axes are swapped / transposed.
      -
      -
      Specified by:
      -
      transpose in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      transpose in interface Nda<V>
      +
      Parameters:
      dim1 - The first dimension to be swapped.
      dim2 - The second dimension to be swapped.
      -
      Returns:
      +
      Returns:
      A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
      - -
    • -
    • -
      -

      to

      -
      Tensor<V> to(Device<?> device)
      -
      This method takes a Device and tries to migrate the contents of this Tensor - instance to that Device!
      -
      -
      Parameters:
      -
      device - The Device which should host this Tensor as well as be added to its components list.
      -
      Returns:
      +
    • +
    + + + +
      +
    • +

      to

      +
      Tensor<V> to(Device<?> device)
      +
      This method takes a Device and tries to migrate the contents of this Tensor + instance to that Device!
      +
      +
      Parameters:
      +
      device - The Device which should host this Tensor as well as be added to its components list.
      +
      Returns:
      This very class to enable method chaining.
      -
    • -
    • -
      -

      to

      -
      default Tensor<V> to(String deviceType)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      to

      +
      default Tensor<V> to(java.lang.String deviceType)
      +
      +
      Parameters:
      deviceType - A search key identifying the device onto which this tensor should be stored.
      -
      Returns:
      +
      Returns:
      This very tensor instance in order to enable method chaining.
      -
    • -
    • -
      -

      set

      -
      default Tensor<V> set(OptimizerFactory optimizerFactory)
      -
      Configures an Optimizer for this tensor based on the given OptimizerFactory - which will be used to create a new Optimizer instance specific to this tensor. - The Optimizer instance will be attached to this tensor as a component - and then called to perform the actual optimization when the applyGradient() method is called. +
    + + + +
      +
    • +

      set

      +
      default Tensor<V> set(OptimizerFactory optimizerFactory)
      +
      Configures an Optimizer for this tensor based on the given OptimizerFactory + which will be used to create a new Optimizer instance specific to this tensor. + The Optimizer instance will be attached to this tensor as a component + and then called to perform the actual optimization when the applyGradient() method is called.

      Here a simple example of how to use this method:

      
         var t = Tsr.of( 1.0, 2.0, 3.0 ).set( Optimizer.ADAM );
         

      - As you can see, the Optimizer interface exposes various types of popular + As you can see, the Optimizer interface exposes various types of popular optimization algorithm factories which can be used to quickly and conveniently create - an Optimizer instance for a particular tensor.

      -
      -
      Parameters:
      -
      optimizerFactory - The OptimizerFactory which will be used to create a new Optimizer instance.
      -
      Returns:
      + an Optimizer instance for a particular tensor. +
      +
      Parameters:
      +
      optimizerFactory - The OptimizerFactory which will be used to create a new Optimizer instance.
      +
      Returns:
      This tensor instance to allow for method chaining.
      -
    • -
    • -
      -

      backward

      -
      default Tensor<V> backward(Tensor<V> error)
      -
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. +
    + + + +
      +
    • +

      backward

      +
      default Tensor<V> backward(Tensor<V> error)
      +
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. This is because autograd requires recording a computation graph for back-prop traversal. If this tensor is part of a computation graph then this method will traverse an error backward in the recorded history towards tensors which require the accumulation of gradients.
      -
      -
      Parameters:
      +
      +
      Parameters:
      error - A tensor which is back-propagated to gradients. Must match the size og this tensor.
      -
      Returns:
      +
      Returns:
      This tensor, to allow for method chaining.
      -
    • -
    • -
      -

      backward

      -
      default Tensor<V> backward(double value)
      -
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. +
    + + + +
      +
    • +

      backward

      +
      default Tensor<V> backward(double value)
      +
      Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. This is because autograd requires recording a computation graph for back-prop traversal. If this tensor is part of a computation graph then this method will traverse an error backward in the recorded history towards tensors which require @@ -3197,21 +3783,24 @@

      backward

      turns it into a matching tensor ( with the same shape) which will then be back-propagated through the recorded computation graph.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - A scalar which is back-propagated to gradients. Must match the size og this tensor.
      -
      Returns:
      +
      Returns:
      The tensor, to allow for method chaining.
      -
    • -
    • -
      -

      backward

      -
      default Tensor<V> backward()
      +
    + + + +
      +
    • +

      backward

      +
      default Tensor<V> backward()
      Use this to back-propagate an error signal of 1.0 through the recorded computation graph. Tensors which are used or produced by operations supporting the autograd system - will have this graph defined by GraphNode components attached to them. + will have this graph defined by GraphNode components attached to them. This is because autograd requires recording a computation graph for back-prop traversal. If this tensor is part of a computation graph then this method will traverse an error backward in the recorded history towards tensors which require @@ -3220,129 +3809,162 @@

      backward

      This method assumes that the user wants to back-propagate an error of "1" having the same shape as this tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      This tensor to allow for method chaining.
      -
    • -
    • -
      -

      getGradient

      -
      default Optional<Tensor<V>> getGradient()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getGradient

      +
      default java.util.Optional<Tensor<V>> getGradient()
      +
      +
      Returns:
      The gradient of this tensor which is internally stored as component.
      -
    • -
    • -
      -

      gradient

      -
      default Optional<Tensor<V>> gradient()
      -
      This is a functionally identical alternative to the getGradient() method.
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      gradient

      +
      default java.util.Optional<Tensor<V>> gradient()
      +
      This is a functionally identical alternative to the getGradient() method.
      +
      +
      Returns:
      The gradient of this tensor which is internally stored as component.
      -
    • -
    • -
      -

      applyGradient

      -
      default void applyGradient()
      +
    + + + +
      +
    • +

      applyGradient

      +
      default void applyGradient()
      If this tensor owns a gradient tensor as component, then it can be applied by this method.
      "Applying" a gradient to a tensor simply means adding the values inside the gradient element-wise to the owning host tensor via an inline operation.
      - -
    • -
    • -
      -

      getDevice

      -
      default Device<V> getDevice()
      -
      -
      Returns:
      -
      The device on which this tensor is stored or CPU if it is not outsourced.
      -
      -
      -
    • -
    • -
      -

      getGraphNode

      -
      default Optional<GraphNode<V>> getGraphNode()
      -
      -
      Returns:
      +
    • +
    + + + +
      +
    • +

      getDevice

      +
      default Device<V> getDevice()
      +
      +
      Returns:
      +
      The device on which this tensor is stored or CPU if it is not outsourced.
      +
      +
    • +
    + + + +
      +
    • +

      getGraphNode

      +
      default java.util.Optional<GraphNode<V>> getGraphNode()
      +
      +
      Returns:
      The graph node optional of the computation graph to which this tensor belongs or an empty optional if not part of a graph.
      -
    • -
    • -
      -

      graphNode

      -
      default Optional<GraphNode<V>> graphNode()
      -
      This is a functionally identical alternative to getGraphNode().
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      graphNode

      +
      default java.util.Optional<GraphNode<V>> graphNode()
      +
      This is a functionally identical alternative to getGraphNode().
      +
      +
      Returns:
      The graph node optional of the computation graph to which this tensor belongs or an empty optional if not part of a graph.
      - -
    • -
    • -
      -

      getFrame

      -
      default Optional<NDFrame<V>> getFrame()
      -
      -
      Returns:
      -
      An instance of the NDFrame component if present.
      -
      -
      -
    • -
    • -
      -

      frame

      -
      default Optional<NDFrame<V>> frame()
      -
      This is a functionally identical alternative to getFrame().
      -
      -
      Returns:
      -
      An instance of the NDFrame component if present.
      -
      -
      -
    • -
    • -
      -

      detached

      -
      default Tensor<V> detached()
      +
    • +
    + + + +
      +
    • +

      getFrame

      +
      default java.util.Optional<NDFrame<V>> getFrame()
      +
      +
      Returns:
      +
      An instance of the NDFrame component if present.
      +
      +
    • +
    + + + +
      +
    • +

      frame

      +
      default java.util.Optional<NDFrame<V>> frame()
      +
      This is a functionally identical alternative to getFrame().
      +
      +
      Returns:
      +
      An instance of the NDFrame component if present.
      +
      +
    • +
    + + + +
      +
    • +

      detached

      +
      default Tensor<V> detached()
      This method returns a new tensor detached from any underlying computation-graph or simply does nothing if no graph is present.
      - Nodes within a computation graph are instances of the "GraphNode" class which are also + Nodes within a computation graph are instances of the "GraphNode" class which are also simple components of the tensors they represent in the graph.
      Therefore, a "detached" clone of this tensor is - simply a tensor without a GraphNode component.
      -
      -
      Returns:
      + simply a tensor without a GraphNode component. +
      +
      Returns:
      This very instance in order to allow for a more streamline usage of this method.
      -
    • -
    • -
      -

      withLabel

      -
      Tensor<V> withLabel(String label)
      -
      -
      Specified by:
      -
      withLabel in interface Nda<V>
      -
      Returns:
      +
    + + + +
      +
    • +

      withLabel

      +
      Tensor<V> withLabel(java.lang.String label)
      +
      +
      Specified by:
      +
      withLabel in interface Nda<V>
      +
      Returns:
      A new nd-array which is a shallow copy of this nd-array but with a different label.
      -
    • -
    • -
      -

      withLabels

      -
      Tensor<V> withLabels(String[]... labels)
      -
      This method receives a nested String array which +
    + + + +
      +
    • +

      withLabels

      +
      Tensor<V> withLabels(java.lang.String[]... labels)
      +
      This method receives a nested String array which ought to contain a label for the index of this nd-array. The index for a single element of this nd-array would be an array of numbers as long as the rank where every number is @@ -3354,21 +3976,24 @@

      withLabels

      dim 0 : ["A", "B"]
      dim 1 : ["1", "2", "3"]

      -
      -
      Specified by:
      -
      withLabels in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      withLabels in interface Nda<V>
      +
      Parameters:
      labels - A nested String array containing labels for indexes of the nd-array dimensions.
      -
      Returns:
      +
      Returns:
      This nd-array (method chaining).
      -
    • -
    • -
      -

      withLabels

      -
      Tensor<V> withLabels(List<List<Object>> labels)
      -
      This method receives a nested String list which +
    + + + +
      +
    • +

      withLabels

      +
      Tensor<V> withLabels(java.util.List<java.util.List<java.lang.Object>> labels)
      +
      This method receives a nested String list which ought to contain a label for the index of this nd-array. The index for a single element of this nd-array would be an array of numbers as long as the rank where every number is @@ -3380,20 +4005,23 @@

      withLabels

      dim 0 : ["A", "B"]
      dim 1 : ["1", "2", "3"]

      -
      -
      Specified by:
      -
      withLabels in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      withLabels in interface Nda<V>
      +
      Parameters:
      labels - A nested String list containing labels for indexes of the nd-array dimensions.
      -
      Returns:
      +
      Returns:
      This nd-array (method chaining).
      -
    • -
    • -
      -

      withLabels

      -
      Tensor<V> withLabels(Map<Object,List<Object>> labels)
      +
    + + + +
      +
    • +

      withLabels

      +
      Tensor<V> withLabels(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels)
      This method provides the ability to label not only the indices of the shape of this nd-array, but also the dimension of the shape. @@ -3406,38 +4034,44 @@

      withLabels

      "dim 1" : ["1", "2", "3"]
      ]

      -
      -
      Specified by:
      -
      withLabels in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      withLabels in interface Nda<V>
      +
      Parameters:
      labels - A map in which the keys are dimension labels and the values are lists of index labels for the dimension.
      -
      Returns:
      +
      Returns:
      This nd-array (method chaining).
      -
    • -
    • -
      -

      is

      -
      boolean is(Class<?> typeClass)
      +
    + + + +
      +
    • +

      is

      +
      boolean is(java.lang.Class<?> typeClass)
      This method compares the passed class with the underlying data-type of this NDArray. If the data-type of this NDArray is equivalent to the passed class then the returned boolean will be true, otherwise the method returns false.
      -
      -
      Parameters:
      +
      +
      Parameters:
      typeClass - The class which ought to be compared to the underlying data-type of this NDArray.
      -
      Returns:
      +
      Returns:
      The truth value of the question: Does this NDArray implementation hold the data of the passed type?
      -
    • -
    • -
      -

      plus

      -
      default Tensor<V> plus(Tensor<V> other)
      +
    + + + +
      +
    • +

      plus

      +
      default Tensor<V> plus(Tensor<V> other)
      This method will produce the addition of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method. If the shapes of both of the involved tensors is identical then the result will be a regular element-wise addition. @@ -3446,38 +4080,46 @@

      plus

      Either the dimensions have the same size or one of them has size 1.
      Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
      And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the addition.
      -
      Returns:
      -
      The sum of this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The sum of this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      plus

      -
      default Tensor<V> plus(V value)
      -
      This method will create a new Tensor - with the provided double scalar added to all elements of this Tensor. +
    + + + + + +
      +
    • +

      plus

      +
      default Tensor<V> plus(V value)
      +
      This method will create a new Tensor + with the provided double scalar added to all elements of this Tensor.

      The shapes of this tensor is irrelevant as the provided value will simply be broadcast to any possible shape.

      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The right operand of the addition.
      -
      Returns:
      +
      Returns:
      The sum between this instance as the left and the passed double as right operand.
      -
    • -
    • -
      -

      minus

      -
      default Tensor<V> minus(Tensor<V> other)
      +
    + + + +
      +
    • +

      minus

      +
      default Tensor<V> minus(Tensor<V> other)
      Performs subtraction on two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method. If the shapes of both of the involved tensors are identical then the result will be a regular element-wise subtraction. @@ -3486,97 +4128,120 @@

      minus

      Either the dimensions have the same size or one of them has size 1.
      Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
      And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the subtraction.
      -
      Returns:
      -
      The difference between this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The difference between this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      minus

      -
      default Tensor<V> minus(V other)
      -
      This method will create a new Tensor - with the provided item subtracted from all elements of this Tensor. +
    + + + + + +
      +
    • +

      minus

      +
      default Tensor<V> minus(V other)
      +
      This method will create a new Tensor + with the provided item subtracted from all elements of this Tensor.

      The shapes of this tensor is irrelevant as the provided item will simply be broadcast to all items od this tensor, irrespective of any shape.

      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the subtraction, which is an item of the same type as this tensor.
      -
      Returns:
      +
      Returns:
      The difference between this instance as the left and the passed item as right operand.
      -
    • -
    • -
      -

      negative

      -
      default Tensor<V> negative()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      negative

      +
      default Tensor<V> negative()
      +
      +
      Returns:
      A clone of this tensor where the signs of all elements are flipped.
      - -
    • -
    • -
      -

      T

      -
      default Tensor<V> T()
      -
      Creates and returns a new Tensor instance which is a transposed twin of this instance.
      - This is a shorter alternative to the functionally identical getT() method.
      -
      -
      Returns:
      -
      A new transposed tensor with the same underlying Data as this tensor.
      -
      -
      -
    • -
    • -
      -

      getT

      -
      default Tensor<V> getT()
      -
      A method which returns a new Tensor instance which is a transposed twin of this instance.
      - This is an alternative to the functionally identical T() method.
      -
      -
      Returns:
      -
      A new transposed tensor with the same underlying Data as this tensor.
      -
      -
      -
    • -
    • -
      -

      mean

      -
      default Tensor<V> mean()
      +
    • +
    + + + +
      +
    • +

      T

      +
      default Tensor<V> T()
      +
      Creates and returns a new Tensor instance which is a transposed twin of this instance.
      + This is a shorter alternative to the functionally identical getT() method.
      +
      +
      Returns:
      +
      A new transposed tensor with the same underlying Data as this tensor.
      +
      +
    • +
    + + + +
      +
    • +

      getT

      +
      default Tensor<V> getT()
      +
      A method which returns a new Tensor instance which is a transposed twin of this instance.
      + This is an alternative to the functionally identical T() method.
      +
      +
      Returns:
      +
      A new transposed tensor with the same underlying Data as this tensor.
      +
      +
    • +
    + + + +
      +
    • +

      mean

      +
      default Tensor<V> mean()
      Calculate the mean value of all values within this tensor and returns it in the form of a scalar tensor.
      This operation supports autograd.
      -
      -
      Returns:
      +
      +
      Returns:
      A scalar tensor which wraps the mean value of all values of this tensor.
      -
    • -
    • -
      -

      sum

      -
      default Tensor<V> sum()
      +
    + + + +
      +
    • +

      sum

      +
      default Tensor<V> sum()
      Calculate the sum value of all values within this tensor and returns it in the form of a scalar tensor.
      This operation supports autograd.
      -
      -
      Returns:
      +
      +
      Returns:
      A scalar tensor which wraps the sum of all values of this tensor.
      -
    • -
    • -
      -

      sum

      -
      default Tensor<V> sum(int axis)
      +
    + + + +
      +
    • +

      sum

      +
      default Tensor<V> sum(int axis)
      Calculate the sum value of all values within this tensor along the specified axis and returns it in the form of a tensor.
      @@ -3585,18 +4250,21 @@

      sum

      sum of all values along the axis 1 is a single value for each of the two first dimensions.
      This operation supports autograd.
      -
      -
      Parameters:
      +
      +
      Parameters:
      axis - The axis along which the sum should be calculated.
      -
      Returns:
      +
      Returns:
      A tensor which wraps the sum of all values of this tensor along the specified axis.
      -
    • -
    • -
      -

      sum

      -
      default Tensor<V> sum(int... axes)
      +
    + + + +
      +
    • +

      sum

      +
      default Tensor<V> sum(int... axes)
      Calculate the sum value of all values within this tensor along the specified axes and returns it in the form of a tensor.
      @@ -3605,157 +4273,187 @@

      sum

      sum of all values along the axis 1 and 2 is a single value for each of the two first dimensions.
      This operation supports autograd.
      -
      -
      Parameters:
      +
      +
      Parameters:
      axes - The axes along which the sum should be calculated.
      -
      Returns:
      +
      Returns:
      A tensor which wraps the sum of all values of this tensor along the specified axes.
      -
    • -
    • -
      -

      min

      -
      default Tensor<V> min()
      +
    + + + +
      +
    • +

      min

      +
      default Tensor<V> min()
      Calculate the min value of all values within this tensor and returns it in the form of a scalar tensor.
      This operation supports autograd.
      -
      -
      Returns:
      +
      +
      Returns:
      A scalar tensor which wraps the smallest of all values of this tensor.
      -
    • -
    • -
      -

      max

      -
      default Tensor<V> max()
      +
    + + + +
      +
    • +

      max

      +
      default Tensor<V> max()
      Calculate the max value of all values within this tensor and returns it in the form of a scalar tensor.
      This operation supports autograd.
      -
      -
      Returns:
      +
      +
      Returns:
      A scalar tensor which wraps the largest of all values of this tensor.
      -
    • -
    • -
      -

      convDot

      -
      default Tensor<V> convDot(Tensor<V> other)
      +
    + + + +
      +
    • +

      convDot

      +
      default Tensor<V> convDot(Tensor<V> other)
      This method performs a convolutional based dot product between the last dimension of this tensor and the first dimension of the passed tensor.
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The tensor which is the right part of the dot product operation.
      -
      Returns:
      +
      Returns:
      A new tensor which is the dot product of this tensor and the passed one.
      -
    • -
    • -
      -

      dot

      -
      default Tensor<V> dot(Tensor<V> other)
      +
    + + + +
      +
    • +

      dot

      +
      default Tensor<V> dot(Tensor<V> other)
      Performs a dot product between the last dimension of this tensor and the first dimension of the provided tensor. However, currently this method can only handle matrices which means - that it is functionally completely identical to the matMul(Tensor) method.
      -
      -
      Parameters:
      + that it is functionally completely identical to the matMul(Tensor) method. +
      +
      Parameters:
      other - The tensor which is the right part of the dot product operation.
      -
      Returns:
      +
      Returns:
      A new tensor which is the dot product of this tensor and the passed one.
      -
    • -
    • -
      -

      matMul

      -
      default Tensor<V> matMul(Tensor<V> other)
      +
    + + + +
      +
    • +

      matMul

      +
      default Tensor<V> matMul(Tensor<V> other)
      This will produce the matrix product of - two tensors with rank 2 (matrices), where the left operand is this Tensor + two tensors with rank 2 (matrices), where the left operand is this Tensor instance and the right operand is the argument passed to the method.
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the matrix multiplication.
      -
      Returns:
      -
      The matrix product of this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The matrix product of this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      conv

      -
      default Tensor<V> conv(Tensor<V> other)
      +
    + + + +
      +
    • +

      conv

      +
      default Tensor<V> conv(Tensor<V> other)
      This method performs convolution between this tensor and the one passed as argument. - The convolution is performed by the Function which is registered under the name "conv".
      -
      -
      Parameters:
      + The convolution is performed by the Function which is registered under the name "conv". +
      +
      Parameters:
      other - The tensor which is the right operand of the convolutional operation.
      -
      Returns:
      +
      Returns:
      A new tensor which is the result of the convolutional operation.
      -
    • -
    • -
      -

      dimtrim

      -
      default Tensor<V> dimtrim()
      -
      This creates a new tensor with the same underlying Data and whose shape is trimmed. +
    + + + +
      +
    • +

      dimtrim

      +
      default Tensor<V> dimtrim()
      +
      This creates a new tensor with the same underlying Data and whose shape is trimmed. A trimmed shape is simply a shape without preceding and trailing ones.
      For example the shape (1x4x1x2x1) would be trimmed to (4x1x2). The underlying operation does not perform a removal of redundant ones all together. Only ones at the start and the beginning will be removed. A scalar tensor will not be affected by this operation.
      -
      -
      Returns:
      +
      +
      Returns:
      A tensor with the same underlying data but possibly trimmed shape without preceding or trailing ones.
      -
    • -
    • -
      -

      isCase

      -
      boolean isCase(Tensor<V> other)
      +
    + + + +
      +
    • +

      isCase

      +
      boolean isCase(Tensor<V> other)
      This method name translates to the "in" keyword in Groovy! The same is true for the "contains" method in Kotlin. Both methods do the exact same thing, however they exist for better language support.
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The tensor which will be checked.
      -
      Returns:
      +
      Returns:
      The answer to the following question: Is the data of the provided tensor a subset of the data of this tensor?
      -
    • -
    • -
      -

      contains

      -
      default boolean contains(Tensor<V> other)
      +
    + + + +
      +
    • +

      contains

      +
      default boolean contains(Tensor<V> other)
      This method name translates to the "in" keyword in Kotlin! The same is true for the "isCase" method in Groovy. Both methods do the exact same thing, however they exist for better language support.
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The tensor which will be checked.
      -
      Returns:
      +
      Returns:
      The answer to the following question: Is the data of the provided tensor a subset of the data of this tensor?
      -
    • -
    • -
      -

      multiply

      -
      default Tensor<V> multiply(Tensor<V> other)
      -
      This method is synonymous to the times(Tensor) method. +
    + + + +
      +
    • +

      multiply

      +
      default Tensor<V> multiply(Tensor<V> other)
      +
      This method is synonymous to the times(Tensor) method. Both of which will produce the product of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method. If the shapes of both of the involved tensors is identical then the result will be a regular element-wise product. @@ -3764,34 +4462,42 @@

      multiply

      Either the dimensions have the same size or one of them has size 1.
      Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
      And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the multiplication.
      -
      Returns:
      -
      The product of this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The product of this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      multiply

      -
      default Tensor<V> multiply(V other)
      -
      -
      Parameters:
      +
    + + + + + +
      +
    • +

      multiply

      +
      default Tensor<V> multiply(V other)
      +
      +
      Parameters:
      other - The value which should be broadcast to all elements of a clone of this tensor.
      -
      Returns:
      +
      Returns:
      A new tensor where all elements are multiplied by the provided value.
      -
    • -
    • -
      -

      times

      -
      default Tensor<V> times(Tensor<V> other)
      -
      This is a functionally identical synonym to the multiply(Tensor) method. +
    + + + +
      +
    • +

      times

      +
      default Tensor<V> times(Tensor<V> other)
      +
      This is a functionally identical synonym to the multiply(Tensor) method. Both of which will produce the product of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method. If the shapes of both of the involved tensors is identical then the result will be a regular element-wise product. @@ -3800,45 +4506,56 @@

      times

      Either the dimensions have the same size or one of them has size 1.
      Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
      And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the multiplication.
      -
      Returns:
      -
      The product of this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The product of this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      times

      -
      default Tensor<V> times(V other)
      -
      -
      Parameters:
      +
    + + + + + +
      +
    • +

      times

      +
      default Tensor<V> times(V other)
      +
      +
      Parameters:
      other - The value which should be broadcast to all elements of a clone of this tensor.
      -
      Returns:
      +
      Returns:
      A new tensor where all elements are multiplied by the provided value.
      -
    • -
    • -
      -

      multiply

      -
      default Tensor<V> multiply(double value)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      multiply

      +
      default Tensor<V> multiply(double value)
      +
      +
      Parameters:
      value - The value which should be broadcast to all elements of a clone of this tensor.
      -
      Returns:
      +
      Returns:
      A new tensor where all elements are multiplied by the provided value.
      -
    • -
    • -
      -

      div

      -
      default Tensor<V> div(Tensor<V> other)
      +
    + + + +
      +
    • +

      div

      +
      default Tensor<V> div(Tensor<V> other)
      This method will produce the quotient of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method. If the shapes of both of the involved tensors are identical then the result will be a regular element-wise division. @@ -3847,27 +4564,35 @@

      div

      Either the dimensions have the same size or one of them has size 1.
      Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
      And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the division.
      -
      Returns:
      -
      The quotient of this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The quotient of this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      div

      -
      default Tensor<V> div(V value)
      -
      +
    + + + + + + + + + +
      +
    • +

      mod

      +
      default Tensor<V> mod(Tensor<V> other)
      Produces the modulus of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method. If the shapes of these 2 tensors are identical then the result will be a regular element-wise modulo operation. @@ -3876,41 +4601,50 @@

      mod

      Either the dimensions have the same size or one of them has size 1.
      Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
      And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand of the modulo operation.
      -
      Returns:
      -
      The modulus of this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The modulus of this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      mod

      -
      default Tensor<V> mod(int other)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      mod

      +
      default Tensor<V> mod(int other)
      +
      +
      Parameters:
      other - The value which should be broadcast to all elements of a clone of this tensor.
      -
      Returns:
      +
      Returns:
      A new tensor where the modulo operation is applied to all elements using the provided int as right operand.
      -
    • -
    • -
      -

      rem

      -
      default Tensor<V> rem(int other)
      -
      This method is synonymous to the mod(int) method.
      -
      +
    + + + +
      +
    • +

      rem

      +
      default Tensor<V> rem(int other)
      +
      This method is synonymous to the mod(int) method.
    • -
    • -
      -

      power

      -
      default Tensor<V> power(Tensor<V> other)
      +
    + + + +
      +
    • +

      power

      +
      default Tensor<V> power(Tensor<V> other)
      This will produce the power of two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tensor + where the left operand is this Tensor instance and the right operand is the tensor passed to the method. If the shapes of the involved tensors are identical then the result will be a regular element-wise exponentiation. @@ -3919,46 +4653,60 @@

      power

      Either the dimensions have the same size or one of them has size 1.
      Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
      And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
      -
      -
      Parameters:
      +
      +
      Parameters:
      other - The right operand, also known as exponent, of the exponentiation.
      -
      Returns:
      -
      The power of this instance as the left and the passed Tensor instance as right operand.
      +
      Returns:
      +
      The power of this instance as the left and the passed Tensor instance as right operand.
      -
    • -
    • -
      -

      power

      -
      default Tensor<V> power(V value)
      +
    + + + + + +
      +
    • +

      power

      +
      default Tensor<V> power(V value)
      Raises all items of this tensor to the power of the provided value. The returned tensor is a new instance which will have the same shape as this tensor.
      -
      -
      Parameters:
      +
      +
      Parameters:
      value - The value which should be used to raise all items of this tensor to the power of.
      -
      Returns:
      +
      Returns:
      A new tensor where all items are raised to the power of the provided value.
      - -
    • -
    • -
      -

      xor

      -
      default Tensor<V> xor(Tensor<V> other)
      -
      This method is a functionally identical synonym to the power(Tensor) method.
      -
      -
    • -
    • -
      -

      xor

      -
      default Tensor<V> xor(double value)
      -
      This method is a functionally identical synonym to the power(Tensor) method.
      -
      -
    • -
    • -
      -

      sig

      -
      default Tensor<V> sig()
      +
    • +
    + + + + + + + +
      +
    • +

      xor

      +
      default Tensor<V> xor(double value)
      +
      This method is a functionally identical synonym to the power(Tensor) method.
      +
    • +
    + + + +
      +
    • +

      sig

      +
      default Tensor<V> sig()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -3966,16 +4714,19 @@ 

      sig

      // Dynamically parsed and instantiated: var out2 = Function.of("sig(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the sigmoid function applied to the items of this tensor.
      -
    • -
    • -
      -

      tanh

      -
      default Tensor<V> tanh()
      +
    + + + +
      +
    • +

      tanh

      +
      default Tensor<V> tanh()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -3983,16 +4734,19 @@ 

      tanh

      // Dynamically parsed and instantiated: var out2 = Function.of("tanh(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the tanh function applied to the items of this tensor.
      -
    • -
    • -
      -

      relu

      -
      default Tensor<V> relu()
      +
    + + + +
      +
    • +

      relu

      +
      default Tensor<V> relu()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4000,16 +4754,19 @@ 

      relu

      // Dynamically parsed and instantiated: var out2 = Function.of("relu(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the relu function applied to the items of this tensor.
      -
    • -
    • -
      -

      sin

      -
      default Tensor<V> sin()
      +
    + + + +
      +
    • +

      sin

      +
      default Tensor<V> sin()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4017,16 +4774,19 @@ 

      sin

      // Dynamically parsed and instantiated: var out2 = Function.of("sin(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the sin function applied to the items of this tensor.
      -
    • -
    • -
      -

      cos

      -
      default Tensor<V> cos()
      +
    + + + +
      +
    • +

      cos

      +
      default Tensor<V> cos()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4034,16 +4794,19 @@ 

      cos

      // Dynamically parsed and instantiated: var out2 = Function.of("cos(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the cos function applied to the items of this tensor.
      -
    • -
    • -
      -

      ln

      -
      default Tensor<V> ln()
      +
    + + + +
      +
    • +

      ln

      +
      default Tensor<V> ln()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4051,16 +4814,19 @@ 

      ln

      // Dynamically parsed and instantiated: var out2 = Function.of("ln(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the ln function applied to the items of this tensor.
      -
    • -
    • -
      -

      softplus

      -
      default Tensor<V> softplus()
      +
    + + + +
      +
    • +

      softplus

      +
      default Tensor<V> softplus()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4068,16 +4834,19 @@ 

      softplus

      // Dynamically parsed and instantiated: var out2 = Function.of("softplus(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the softplus function applied to the items of this tensor.
      -
    • -
    • -
      -

      exp

      -
      default Tensor<V> exp()
      +
    + + + +
      +
    • +

      exp

      +
      default Tensor<V> exp()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4085,16 +4854,19 @@ 

      exp

      // Dynamically parsed and instantiated: var out2 = Function.of("exp(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the exp function applied to the items of this tensor.
      -
    • -
    • -
      -

      sqrt

      -
      default Tensor<V> sqrt()
      +
    + + + +
      +
    • +

      sqrt

      +
      default Tensor<V> sqrt()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4102,16 +4874,19 @@ 

      sqrt

      // Dynamically parsed and instantiated: var out2 = Function.of("sqrt(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the sqrt function applied to the items of this tensor.
      -
    • -
    • -
      -

      log10

      -
      default Tensor<V> log10()
      +
    + + + +
      +
    • +

      log10

      +
      default Tensor<V> log10()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4119,16 +4894,19 @@ 

      log10

      // Dynamically parsed and instantiated: var out2 = Function.of("log10(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the log10 function applied to the items of this tensor.
      -
    • -
    • -
      -

      cbrt

      -
      default Tensor<V> cbrt()
      +
    + + + +
      +
    • +

      cbrt

      +
      default Tensor<V> cbrt()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4136,16 +4914,19 @@ 

      cbrt

      // Dynamically parsed and instantiated: var out2 = Function.of("cbrt(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the cbrt function applied to the items of this tensor.
      -
    • -
    • -
      -

      abs

      -
      default Tensor<V> abs()
      +
    + + + +
      +
    • +

      abs

      +
      default Tensor<V> abs()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4153,16 +4934,19 @@ 

      abs

      // Dynamically parsed and instantiated: var out2 = Function.of("abs(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the abs function applied to the items of this tensor.
      -
    • -
    • -
      -

      neg

      -
      default Tensor<V> neg()
      +
    + + + +
      +
    • +

      neg

      +
      default Tensor<V> neg()
      This method is a functionally identical to the following alternatives:
      
             // Pre-instantiated:
      @@ -4170,65 +4954,80 @@ 

      neg

      // Dynamically parsed and instantiated: var out2 = Function.of("neg(I[0])").call(myTensor);
      -
      -
      Returns:
      +
      +
      Returns:
      A new tensor whose items are the result of the neg function applied to the items of this tensor.
      -
    • -
    • -
      -

      softmax

      -
      default Tensor<V> softmax()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      softmax

      +
      default Tensor<V> softmax()
      +
      +
      Returns:
      A new tensor whose items are the result of the softmax function applied to the items of this tensor.
      -
    • -
    • -
      -

      softmax

      -
      default Tensor<V> softmax(int axis)
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      softmax

      +
      default Tensor<V> softmax(int axis)
      +
      +
      Returns:
      A new tensor whose items are the result of the softmax function applied to the items of this tensor.
      -
    • -
    • -
      -

      softmax

      -
      default Tensor<V> softmax(int... axes)
      +
    + + + +
      +
    • +

      softmax

      +
      default Tensor<V> softmax(int... axes)
      Calculates the softmax function along the specified axes.
      For example, if this tensor has a shape of (2, 3, 4) and the axes 0 and 2 are chosen, then the result will be a tensor of the same size where all elements summed up alongside axis 0 and 2 would be 1. Ao calling sum(0, 2) would in this example be a tensor of shape of (1, 3, 1) where every item is 1.
      This operation supports autograd.
      -
      -
      Parameters:
      +
      +
      Parameters:
      axes - The axes along which the softmax function should be applied.
      -
      Returns:
      +
      Returns:
      A new tensor whose items are the result of the softmax function applied to the items of this tensor.
      -
    • -
    • -
      -

      sigmoid

      -
      default Tensor<V> sigmoid()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      sigmoid

      +
      default Tensor<V> sigmoid()
      +
      +
      Returns:
      A new tensor whose items are the result of the sigmoid function applied to the items of this tensor.
      -
    • -
    • -
      -

      slice

      -
      AxisOrGetTensor<V> slice()
      -
      This method returns a SliceBuilder instance exposing a simple builder API +
    + + + +
      +
    • +

      slice

      +
      AxisOrGetTensor<V> slice()
      +
      This method returns a SliceBuilder instance exposing a simple builder API which enables the configuration of a slice of the current nd-array via method chaining.
      The following code snippet slices a 3-dimensional nd-array into a nd-array of shape (2x1x3)
      
      @@ -4238,211 +5037,247 @@ 

      slice

      .axis().from(0).to(2) .get();
      -
      -
      Specified by:
      -
      slice in interface Nda<V>
      -
      Returns:
      -
      An instance of the SliceBuilder class exposing a readable builder API for creating slices.
      -
      - -
    • -
    • -
      -

      concatAt

      -
      default Tensor<V> concatAt(int axis, - Nda<V> other, - Nda<V>... ndArrays)
      +
      +
      Specified by:
      +
      slice in interface Nda<V>
      +
      Returns:
      +
      An instance of the SliceBuilder class exposing a readable builder API for creating slices.
      +
      +
    • +
    + + + +
      +
    • +

      concatAt

      +
      default Tensor<V> concatAt(int axis,
      +                           Nda<V> other,
      +                           Nda<V>... ndArrays)
      This method concatenates the provided nd-arrays together with this nd-array along a specified axis. The provided nd-arrays must have the same shape and data type as the current nd-array, except for the specified axis.
      -
      -
      Specified by:
      -
      concatAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      concatAt in interface Nda<V>
      +
      Parameters:
      axis - The axis along which the provided nd-arrays should be concatenated. The axis must be within the range of the rank of the current nd-array.
      other - The other nd-arrays which should be concatenated with this nd-array.
      ndArrays - The non-null, non-empty nd-arrays which should be concatenated together with this and the other nd-array. The nd-arrays all must have the same shape as this nd-array, except for the specified axis. Also, it must have the same data type as the current nd-array.
      -
      Returns:
      +
      Returns:
      A new nd-array which is the concatenation of the current nd-array and the provided nd-arrays.
      -
    • -
    • -
      -

      concatAt

      -
      default Tensor<V> concatAt(int axis, - Nda<V> other)
      +
    + + + +
      +
    • +

      concatAt

      +
      default Tensor<V> concatAt(int axis,
      +                           Nda<V> other)
      This method concatenates the provided nd-array together with this nd-array along a specified axis. The provided nd-array must have the same shape and data type as this nd-array, except for the specified axis.
      -
      -
      Specified by:
      -
      concatAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      concatAt in interface Nda<V>
      +
      Parameters:
      axis - The axis along which the provided nd-arrays should be concatenated. The axis must be within the range of the rank of the current nd-array.
      other - The other nd-arrays which should be concatenated with this nd-array.
      -
      Returns:
      +
      Returns:
      A new nd-array which is the concatenation of the current nd-array and the provided nd-arrays.
      -
    • -
    • -
      -

      getAt

      -
      Tensor<V> getAt(int... indices)
      +
    + + + +
      +
    • +

      getAt

      +
      Tensor<V> getAt(int... indices)
      The following method enables access to specific scalar elements within the nd-array. The method name also translates to the subscription operator in Groovy.
      -
      -
      Specified by:
      -
      getAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      getAt in interface Nda<V>
      +
      Parameters:
      indices - The index array of the element which should be returned.
      -
      Returns:
      +
      Returns:
      An element located at the provided index.
      -
    • -
    • -
      -

      getAt

      -
      default Tensor<V> getAt(Number i)
      +
    + + + +
      +
    • +

      getAt

      +
      default Tensor<V> getAt(java.lang.Number i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Specified by:
      -
      getAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      getAt in interface Nda<V>
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      default Tensor<V> get(int... indices)
      +
    + + + +
      +
    • +

      get

      +
      default Tensor<V> get(int... indices)
      The following method enables access to specific scalar elements within the nd-array. The method name also translates to the subscription operator in Groovy.
      -
      -
      Specified by:
      -
      get in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      get in interface Nda<V>
      +
      Parameters:
      indices - The index array of the element which should be returned.
      -
      Returns:
      +
      Returns:
      An element located at the provided index.
      -
    • -
    • -
      -

      getAt

      -
      default Tensor<V> getAt(Object... args)
      +
    + + + +
      +
    • +

      getAt

      +
      default Tensor<V> getAt(java.lang.Object... args)
      The following method enables the creation of nd-array slices which access the same underlying data (possibly from a different view). The method name also translates to the subscription operator in Groovy.
      -
      -
      Specified by:
      -
      getAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      getAt in interface Nda<V>
      +
      Parameters:
      args - An arbitrary number of arguments which can be used for slicing.
      -
      Returns:
      +
      Returns:
      A slice nd-array created based on the passed keys.
      -
    • -
    • -
      -

      get

      -
      default Tensor<V> get(Object... args)
      +
    + + + +
      +
    • +

      get

      +
      default Tensor<V> get(java.lang.Object... args)
      The following method enables the creation of nd-array slices which access the same underlying data (possibly from a different view). The method name also translates to the subscription operator in Groovy.
      -
      -
      Specified by:
      -
      get in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      get in interface Nda<V>
      +
      Parameters:
      args - An arbitrary number of arguments which can be used for slicing.
      -
      Returns:
      +
      Returns:
      A slice nd-array created based on the passed keys.
      -
    • -
    • -
      -

      getAt

      -
      default Tensor<V> getAt(int i)
      +
    + + + +
      +
    • +

      getAt

      +
      default Tensor<V> getAt(int i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Specified by:
      -
      getAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      getAt in interface Nda<V>
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      default Tensor<V> get(int i)
      +
    + + + +
      +
    • +

      get

      +
      default Tensor<V> get(int i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Specified by:
      -
      get in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      get in interface Nda<V>
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      default Tensor<V> get(Number i)
      +
    + + + +
      +
    • +

      get

      +
      default Tensor<V> get(java.lang.Number i)
      This getter method creates and returns a slice of the original nd-array. The returned slice is a scalar nd-array wrapping a single value element which is being targeted by the provided integer index.
      -
      -
      Specified by:
      -
      get in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      get in interface Nda<V>
      +
      Parameters:
      i - The index of the value item which should be returned as a nd-array instance.
      -
      Returns:
      +
      Returns:
      A nd-array holding a single value element which is internally still residing in the original nd-array.
      -
    • -
    • -
      -

      get

      -
      default Tensor<V> get(Object key)
      +
    + + + +
      +
    • +

      get

      +
      default Tensor<V> get(java.lang.Object key)
      This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
      -
      -
      Specified by:
      -
      get in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      get in interface Nda<V>
      +
      Parameters:
      key - This object might be a wide range of objects including maps, lists or arrays...
      -
      Returns:
      +
      Returns:
      A slice nd-array or scalar value.
      -
    • -
    • -
      -

      getAt

      -
      Tensor<V> getAt(Map<?,Integer> rangToSteps)
      +
    + + + +
      +
    • +

      getAt

      +
      Tensor<V> getAt(java.util.Map<?,java.lang.Integer> rangToSteps)
      This method is most useful when used in Groovy where defining maps is done through square brackets, making it possible to slice nd-arrays like so:
      @@ -4454,39 +5289,45 @@

      getAt

      i... start indexAlias.
      j... end indexAlias. (inclusive!)
      k... step size.
      -
      -
      Specified by:
      -
      getAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      getAt in interface Nda<V>
      +
      Parameters:
      rangToSteps - A map where the keys define where axes should be sliced and values which define the steps for the specific axis.
      -
      Returns:
      +
      Returns:
      A nd-array slice with an offset based on the provided map keys and steps based on the provided map values.
      -
    • -
    • -
      -

      getAt

      -
      Tensor<V> getAt(List<?> key)
      +
    + + + +
      +
    • +

      getAt

      +
      Tensor<V> getAt(java.util.List<?> key)
      This method enables nd-array slicing! It takes a key of various types and configures a slice nd-array which shares the same underlying data as the original nd-array.
      -
      -
      Specified by:
      -
      getAt in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      getAt in interface Nda<V>
      +
      Parameters:
      key - This object might be a wide range of objects including maps, lists or arrays...
      -
      Returns:
      +
      Returns:
      A slice nd-array or scalar value.
      -
    • -
    • -
      -

      mapTo

      -
      default <T> Tensor<T> mapTo(Class<T> typeClass, - Function<V,T> mapper)
      +
    + + + +
      +
    • +

      mapTo

      +
      default <T> Tensor<T> mapTo(java.lang.Class<T> typeClass,
      +                            java.util.function.Function<V,T> mapper)

      This is a convenience method for mapping a nd-array to a nd-array of new type based on a provided target item type and mapping lambda. @@ -4504,23 +5345,26 @@

      mapTo

      where ever it may reside back to the JVM, execute the mapping lambda, and then transfer the result back to the original location.

      -
      -
      Specified by:
      -
      mapTo in interface Nda<V>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      mapTo in interface Nda<V>
      +
      Type Parameters:
      T - The type parameter of the items of the returned nd-array.
      -
      Parameters:
      +
      Parameters:
      typeClass - The class of the item type to which the items of this nd-array should be mapped.
      mapper - The lambda which maps the items of this nd-array to a new one.
      -
      Returns:
      +
      Returns:
      A new nd-array of type T.
      -
    • -
    • -
      -

      map

      -
      default Tensor<V> map(Function<V,V> mapper)
      +
    + + + +
      +
    • +

      map

      +
      default Tensor<V> map(java.util.function.Function<V,V> mapper)

      This method is a convenience method for mapping the items of this nd-array to another nd-array of the same type based on the provided lambda function, which will be applied @@ -4536,70 +5380,85 @@

      map

      This is a problem if this nd-array lives somewhere other than the JVM. So, therefore, this method will temporally transfer this nd-array from where ever it may reside back to the JVM, execute the mapping lambda, and then transfer the result back to the original location.
      -
      -
      Specified by:
      -
      map in interface Nda<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      map in interface Nda<V>
      +
      Parameters:
      mapper - The lambda which maps the items of this nd-array to a new one.
      -
      Returns:
      +
      Returns:
      A new nd-array of type V.
      -
    • -
    • -
      -

      asImage

      - -
      Turns this tensor into a BufferedImage based on the provided - Tensor.ImageType formatting choice.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      asImage

      +
      java.awt.image.BufferedImage asImage(Tensor.ImageType type)
      +
      Turns this tensor into a BufferedImage based on the provided + Tensor.ImageType formatting choice.
      +
      +
      Parameters:
      type - The type of format used to create the buffered image.
      -
      Returns:
      -
      A BufferedImage populated with the contents of this tensor.
      +
      Returns:
      +
      A BufferedImage populated with the contents of this tensor.
      -
    • -
    • -
      -

      asType

      -
      <T> T asType(Class<T> typeClass)
      -
      -
      Type Parameters:
      +
    + + + +
      +
    • +

      asType

      +
      <T> T asType(java.lang.Class<T> typeClass)
      +
      +
      Type Parameters:
      T - The type parameter of the type that will be returned.
      -
      Parameters:
      +
      Parameters:
      typeClass - The class which is the target of the type conversion.
      -
      Returns:
      +
      Returns:
      An instance of the supplied type class.
      -
    • -
    • -
      -

      toString

      -
      default String toString(String conf)
      -
      +
    + + + +
      +
    • +

      toString

      +
      default java.lang.String toString(java.lang.String conf)
    • -
    • -
      -

      toString

      -
      default String toString(NDPrintSettings config)
      -
      Use this to turn this nd-array into a String instance based on the provided - NDPrintSettings instance, which allows you to configure things +
    + + + +
      +
    • +

      toString

      +
      default java.lang.String toString(NDPrintSettings config)
      +
      Use this to turn this nd-array into a String instance based on the provided + NDPrintSettings instance, which allows you to configure things like the number of chars per entry, delimiters, the number of items per line, etc.
      -
      -
      Specified by:
      -
      toString in interface Nda<V>
      +
      +
      Specified by:
      +
      toString in interface Nda<V>
      -
    • -
    • -
      -

      toString

      -
      default String toString(Consumer<NDPrintSettings> configurator)
      +
    + + + +
      +
    • +

      toString

      +
      default java.lang.String toString(java.util.function.Consumer<NDPrintSettings> configurator)
      This allows you to provide a lambda which configures how this nd-array should be - converted to String instances. - The provided Consumer will receive a NDPrintSettings instance + converted to String instances. + The provided Consumer will receive a NDPrintSettings instance which allows you to change various settings with the help of method chaining.
      Here is an example:
      
      @@ -4611,78 +5470,149 @@ 

      toString

      .setCellSize(15) );
      -
      -
      Specified by:
      -
      toString in interface Nda<V>
      -
      Parameters:
      -
      configurator - A consumer of the NDPrintSettings ready to be configured.
      -
      Returns:
      -
      The String representation of this nd-array.
      -
      - -
    • -
    • -
      -

      deepCopy

      -
      Tensor<V> deepCopy()
      +
      +
      Specified by:
      +
      toString in interface Nda<V>
      +
      Parameters:
      +
      configurator - A consumer of the NDPrintSettings ready to be configured.
      +
      Returns:
      +
      The String representation of this nd-array.
      +
      +
    • +
    + + + +
      +
    • +

      deepCopy

      +
      Tensor<V> deepCopy()
      This method creates and returns a new nd-array instance which is not only a copy of the configuration of this nd-array but also a copy of the underlying data array.
      (Note: the underlying nd-array will not be attached to any kind of computation graph)
      -
      -
      Specified by:
      -
      deepCopy in interface Nda<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      deepCopy in interface Nda<V>
      +
      Returns:
      A new nd-array instance which is a deep copy of this nd-array.
      -
    • -
    • -
      -

      shallowCopy

      -
      default Tensor<V> shallowCopy()
      +
    + + + +
      +
    • +

      shallowCopy

      +
      default Tensor<V> shallowCopy()
      This creates a copy where the underlying data is still the same.
      (Note: the underlying nd-array will not be attached to any kind of computation graph)
      -
      -
      Specified by:
      -
      shallowCopy in interface Nda<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      shallowCopy in interface Nda<V>
      +
      Returns:
      A shallow copy where the underlying data is shared with this nd-array.
      -
    • -
    • -
      -

      deepClone

      -
      Tensor<V> deepClone()
      -
      This is almost identical to the deepCopy() method except that +
    + + + +
      +
    • +

      deepClone

      +
      Tensor<V> deepClone()
      +
      This is almost identical to the deepCopy() method except that the returned tensor will have autograd support, meaning that the cloning will be part of the autograd computation graph, and backpropagation will traverse the cloned tensor as well.
      -
      -
      Returns:
      +
      +
      Returns:
      A deep clone of this tensor with autograd support.
      -
    • -
    • -
      -

      shallowClone

      -
      default Tensor<V> shallowClone()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      shallowClone

      +
      default Tensor<V> shallowClone()
      +
      +
      Returns:
      A shallow copy of this tensor with autograd support.
      -
    - - + + + + - + + + + diff --git a/docs/jdocs/neureka/Tsr.ImageType.html b/docs/jdocs/neureka/Tsr.ImageType.html deleted file mode 100644 index 5e224806c..000000000 --- a/docs/jdocs/neureka/Tsr.ImageType.html +++ /dev/null @@ -1,568 +0,0 @@ - - - - - -Tsr.ImageType (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    -
    Package neureka
    -

    Enum Tsr.ImageType

    -
    -
    -
      -
    • java.lang.Object
    • -
    • - -
    • -
    -
    -
      -
    • -
      -
      All Implemented Interfaces:
      -
      java.io.Serializable, java.lang.Comparable<Tsr.ImageType>
      -
      -
      -
      Enclosing interface:
      -
      Tsr<V>
      -
      -
      -
      public static enum Tsr.ImageType
      -extends java.lang.Enum<Tsr.ImageType>
      -
      Use this enum as argument for the Tsr.asImage(Tsr.ImageType) method to - specify the type of image that should be returned.
      -
    • -
    -
    -
    - -
    -
    -
      -
    • - -
      - -
      - -
      -
        -
      • - - -

        Field Detail

        - - - -
          -
        • -

          bufferType

          -
          public final int bufferType
          -
        • -
        - - - -
          -
        • -

          dataType

          -
          public final DataType<?> dataType
          -
        • -
        - - - -
          -
        • -

          numberOfChannels

          -
          public final int numberOfChannels
          -
        • -
        -
      • -
      -
      - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          values

          -
          public static Tsr.ImageType[] values()
          -
          Returns an array containing the constants of this enum type, in -the order they are declared. This method may be used to iterate -over the constants as follows: -
          -for (Tsr.ImageType c : Tsr.ImageType.values())
          -    System.out.println(c);
          -
          -
          -
          Returns:
          -
          an array containing the constants of this enum type, in the order they are declared
          -
          -
        • -
        - - - -
          -
        • -

          valueOf

          -
          public static Tsr.ImageType valueOf​(java.lang.String name)
          -
          Returns the enum constant of this type with the specified name. -The string must match exactly an identifier used to declare an -enum constant in this type. (Extraneous whitespace characters are -not permitted.)
          -
          -
          Parameters:
          -
          name - the name of the enum constant to be returned.
          -
          Returns:
          -
          the enum constant with the specified name
          -
          Throws:
          -
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          -
          java.lang.NullPointerException - if the argument is null
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - - - - diff --git a/docs/jdocs/neureka/Tsr.html b/docs/jdocs/neureka/Tsr.html deleted file mode 100644 index f4ee6eb82..000000000 --- a/docs/jdocs/neureka/Tsr.html +++ /dev/null @@ -1,5845 +0,0 @@ - - - - - -Tsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    -
    Package neureka
    -

    Interface Tsr<V>

    -
    -
    -
    -
      -
    • -
      -
      Type Parameters:
      -
      V - The type parameter for the individual value items within this tensor.
      -
      -
      -
      All Superinterfaces:
      -
      Component<Tsr<V>>, ComponentOwner<Tsr<V>>, java.lang.Iterable<V>, Nda<V>, NDimensional
      -
      -
      -
      public interface Tsr<V>
      -extends Nda<V>, Component<Tsr<V>>, ComponentOwner<Tsr<V>>
      -
      Tsr is a 3 letter abbreviation of the word "tensor", a mathematical concept. - A tensor is a type of multidimensional data-structure with certain transformation properties. - Technically however, it is mostly a simple container / data-structure which can house data indexed by N dimensions. - Therefore, it is often also described as a nd-array. - Elements of a tensor are also mostly numeric.
      - This means that:
      - ...a tensor of rank 0 is a scalar, a tensor of rank 1 is a vector and a tensor of rank 2 is a matrix, etc... -

      - Consequently, tensors are a perfect fit for applying various operations on them. - Such operations might be simple element-wise operations or more complex linear operations like - the dot-product, matrix- or even tensor multiplications.
      -
      -
    • -
    -
    -
    -
      -
    • - -
      - -
      - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        All Methods Static Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethodDescription
        default Tsr<V>abs() -
        This method is a functionally identical to the following alternatives:
        -
        default voidapplyGradient() -
        If this tensor owns a gradient tensor as component, then it can be applied by this method.
        -
        java.awt.image.BufferedImageasImage​(Tsr.ImageType type) -
        Turns this tensor into a BufferedImage based on the provided - Tsr.ImageType formatting choice.
        -
        <T> TasType​(java.lang.Class<T> typeClass) 
        default Tsr<V>backward() -
        Use this to back-propagate an error signal of 1.0 through the recorded computation graph.
        -
        default Tsr<V>backward​(double value) -
        Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
        -
        default Tsr<V>backward​(Tsr<V> error) -
        Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
        -
        default booleanbelongsToGraph() -
        Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
        -
        default Tsr<V>cbrt() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>concatAt​(int axis, - Nda<V> other) -
        This method concatenates the provided nd-array together with this nd-array along a specified axis.
        -
        default Tsr<V>concatAt​(int axis, - Nda<V> other, - Nda<V>... ndArrays) -
        This method concatenates the provided nd-arrays together with this nd-array along a specified axis.
        -
        default booleancontains​(Tsr<V> other) -
        This method name translates to the "in" keyword in Kotlin! - The same is true for the "isCase" method in Groovy.
        -
        default Tsr<V>conv​(Tsr<V> other) -
        This method performs convolution between this tensor and the one passed as argument.
        -
        default Tsr<V>convDot​(Tsr<V> other) -
        This method performs a convolutional based dot product between the last dimension of this tensor - and the first dimension of the passed tensor.
        -
        default Tsr<V>cos() -
        This method is a functionally identical to the following alternatives:
        -
        Tsr<V>deepClone() -
        This is almost identical to the deepCopy() method except that - the returned tensor will have autograd support, meaning that the cloning - will be part of the autograd computation graph, and backpropagation - will traverse the cloned tensor as well.
        -
        Tsr<V>deepCopy() -
        This method creates and returns a new nd-array instance - which is not only a copy of the configuration of this nd-array but also a copy of - the underlying data array.
        -
        default Tsr<V>detached() -
        This method returns a new tensor detached from any underlying computation-graph - or simply does nothing if no graph is present.
        - Nodes within a computation graph are instances of the "GraphNode" class which are also - simple components of the tensors they represent in the graph.
        -
        default Tsr<V>dimtrim() -
        This creates a new tensor with the same underlying Data and whose shape is trimmed.
        -
        default Tsr<V>div​(Tsr<V> other) -
        This method will produce the quotient of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method.
        -
        default Tsr<V>div​(V value) 
        default Tsr<V>dot​(Tsr<V> other) -
        Performs a dot product between the last dimension of this tensor - and the first dimension of the provided tensor.
        -
        default Tsr<V>exp() -
        This method is a functionally identical to the following alternatives:
        -
        default java.util.Optional<NDFrame<V>>frame() -
        This is a functionally identical alternative to getFrame().
        -
        default Tsr<V>get​(int i) -
        This getter method creates and returns a slice of the original nd-array.
        -
        default Tsr<V>get​(int... indices) -
        The following method enables access to specific scalar elements within the nd-array.
        -
        default Tsr<V>get​(java.lang.Number i) -
        This getter method creates and returns a slice of the original nd-array.
        -
        default Tsr<V>get​(java.lang.Object key) -
        This method enables nd-array slicing! - It takes a key of various types and configures a slice - nd-array which shares the same underlying data as the original nd-array.
        -
        default Tsr<V>get​(java.lang.Object... args) -
        The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
        -
        default Tsr<V>getAt​(int i) -
        This getter method creates and returns a slice of the original nd-array.
        -
        Tsr<V>getAt​(int... indices) -
        The following method enables access to specific scalar elements within the nd-array.
        -
        default Tsr<V>getAt​(java.lang.Number i) -
        This getter method creates and returns a slice of the original nd-array.
        -
        default Tsr<V>getAt​(java.lang.Object... args) -
        The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view).
        -
        Tsr<V>getAt​(java.util.List<?> key) -
        This method enables nd-array slicing! - It takes a key of various types and configures a slice - nd-array which shares the same underlying data as the original nd-array.
        -
        Tsr<V>getAt​(java.util.Map<?,​java.lang.Integer> rangToSteps) -
        This method is most useful when used in Groovy - where defining maps is done through square brackets, - making it possible to slice nd-arrays like so:
        -
        DataType<V>getDataType() -
        This method returns the DataType instance of this Tsr, which is - a wrapper object for the actual type class representing the value items stored inside - the underlying data array of this tensor.
        -
        default Device<V>getDevice() 
        default java.util.Optional<NDFrame<V>>getFrame() 
        default java.util.Optional<Tsr<V>>getGradient() 
        default java.util.Optional<GraphNode<V>>getGraphNode() 
        MutateTsr<V>getMut() -
        This method exposes an API for mutating the state of this tensor.
        -
        java.lang.Class<?>getRepresentativeItemClass() -
        The Class returned by this method is the representative Class of the - value items of a concrete AbstractNda but not necessarily the actual Class of - a given value item, this is especially true for numeric types, which are represented by - implementations of the NumericType interface.
        -
        default Tsr<V>getT() -
        A method which returns a new Tsr instance which is a transposed twin of this instance.
        - This is an alternative to the functionally identical T() method.
        -
        intgetVersion() -
        The version number is tracking how often this tensor has been mutated.
        -
        default java.util.Optional<Tsr<V>>gradient() -
        This is a functionally identical alternative to the getGradient() method.
        -
        booleangradientApplyRequested() -
        This flag works alongside two autograd features which can be enabled inside the library settings.
        -
        default java.util.Optional<GraphNode<V>>graphNode() -
        This is a functionally identical alternative to getGraphNode().
        -
        default booleanhasGradient() -
        Tensors can be components of other tensors which makes the - implicitly their gradients.
        -
        booleanis​(java.lang.Class<?> typeClass) -
        This method compares the passed class with the underlying data-type of this NDArray.
        -
        default booleanisBranch() -
        Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
        -
        booleanisCase​(Tsr<V> other) -
        This method name translates to the "in" keyword in Groovy! - The same is true for the "contains" method in Kotlin.
        -
        booleanisDeleted() -
        This will check if the MutateTsr.delete() method was previously called on this tensor.
        -
        default booleanisEmpty() -
        A tensor is empty if it's Data storage is null.
        -
        booleanisIntermediate() -
        Intermediate tensors are internal non-user tensors which may be eligible - for deletion when further consumed by a Function.
        -
        default booleanisLeave() -
        Tensors which are used or produced by the autograd system will have a GraphNode component attached to them.
        -
        default booleanisOutsourced() -
        Outsourced means that the tensor is stored on a Device implementation instance which is not the CPU.
        -
        default booleanisPartialSlice() -
        If this nd-array is a partial slice of a parent nd-array then this method will yield true.
        -
        default booleanisShallowCopy() -
        If this nd-array is a shallow copy of a parent nd-array then this method will yield true.
        -
        default booleanisSlice() -
        If this nd-array is a slice of a parent nd-array then this method will yield true.
        -
        default booleanisSliceParent() -
        If slices have been derived from this nd-array then it is a "slice parent".
        -
        default booleanisUndefined() -
        A tensor is "undefined" if it has either no NDConfiguration implementation instance - or this instance does not have a shape set for this Tsr which is needed for - a tensor to also have a rank and dimensionality...
        -
        booleanisVirtual() -
        A Virtual tensor is a tensor whose underlying data array is of size 1, holding only a single value.
        -
        static <V> IterByOrIterFromOrAllTsr<V>like​(Tsr<V> template) -
        Use this factory method to instantiate a new tensor with the same data type, shape - and memory location (Device instance) as the provided template tensor.
        -
        default Tsr<V>ln() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>log10() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>map​(java.util.function.Function<V,​V> mapper) -
        - This method is a convenience method for mapping the items of this nd-array to another - nd-array of the same type based on the provided lambda function, which will be applied - to all items of this nd-array individually (element-wise).
        -
        default <T> Tsr<T>mapTo​(java.lang.Class<T> typeClass, - java.util.function.Function<V,​T> mapper) -
        - This is a convenience method for mapping a nd-array to a nd-array of new type - based on a provided target item type and mapping lambda.
        -
        default Tsr<V>matMul​(Tsr<V> other) -
        This will produce the matrix product of - two tensors with rank 2 (matrices), where the left operand is this Tsr - instance and the right operand is the argument passed to the method.
        -
        default Tsr<V>max() -
        Calculate the max value of all values - within this tensor and returns it - in the form of a scalar tensor.
        -
        default Tsr<V>mean() -
        Calculate the mean value of all values - within this tensor and returns it - in the form of a scalar tensor.
        -
        default Tsr<V>min() -
        Calculate the min value of all values - within this tensor and returns it - in the form of a scalar tensor.
        -
        default Tsr<V>minus​(Tsr<V> other) -
        Performs subtraction on - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method.
        -
        default Tsr<V>minus​(V other) -
        This method will create a new Tsr - with the provided item subtracted from all elements of this Tsr.
        -
        default Tsr<V>mod​(int other) 
        default Tsr<V>mod​(Tsr<V> other) -
        Produces the modulus of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method.
        -
        default Tsr<V>multiply​(double value) 
        default Tsr<V>multiply​(Tsr<V> other) -
        This method is synonymous to the times(Tsr) method.
        -
        default Tsr<V>multiply​(V other) 
        default MutateTsr<V>mut() -
        This method exposes an API for mutating the state of this tensor.
        -
        default Tsr<V>neg() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>negative() 
        static Tsr<java.lang.Object>newInstance() -
        This static factory method creates and return a completely empty and undefined tensor - which is void of any contents and meaning.
        -
        static Tsr<java.lang.Boolean>of​(boolean... value) -
        Constructs a vector of booleans based on the provided array.
        -
        static Tsr<java.lang.Byte>of​(byte value) -
        Constructs a vector of bytes based on the provided array.
        -
        static Tsr<java.lang.Byte>of​(byte... value) -
        Constructs a vector of bytes based on the provided array.
        -
        static Tsr<java.lang.Double>of​(double value) 
        static Tsr<java.lang.Double>of​(double... value) -
        Constructs a vector of doubles based on the provided array.
        -
        static Tsr<java.lang.Float>of​(float value) -
        Constructs a vector of floats based on the provided array.
        -
        static Tsr<java.lang.Float>of​(float... value) -
        Constructs a vector of floats based on the provided array.
        -
        static Tsr<java.lang.Integer>of​(int value) -
        Constructs a vector of ints based on the provided array.
        -
        static Tsr<java.lang.Integer>of​(int... value) -
        Constructs a vector of ints based on the provided array.
        -
        static Tsr<java.lang.Long>of​(long value) -
        Constructs a vector of longs based on the provided array.
        -
        static Tsr<java.lang.Long>of​(long... value) -
        Constructs a vector of longs based on the provided array.
        -
        static Tsr<java.lang.Short>of​(short value) -
        Constructs a vector of shorts based on the provided array.
        -
        static Tsr<java.lang.Short>of​(short... value) -
        Constructs a vector of shorts based on the provided array.
        -
        static <T> Tsr<T>of​(java.lang.Class<T> type, - java.util.List<java.lang.Object> conf) -
        This factory method will turn a list of values or nested lists of values into a Tsr - instance with the corresponding rank and shape and whose values - are of the provided type.
        -
        static <V> WithShapeOrScalarOrVectorOnDevice<V>of​(java.lang.Class<V> type) -
        This is the entry point to the fluent tensor builder API for building - Tsr instances in a readable and type safe fashion.
        -
        static <V> Tsr<V>of​(java.lang.Class<V> type, - java.util.List<java.lang.Integer> shape, - java.lang.Object data) -
        Use this to construct and return a tensor of the specified type, shape and data object.
        -
        static <V> Tsr<V>of​(java.lang.Class<V> type, - java.util.List<java.lang.Integer> shape, - java.util.List<V> data) -
        Use this to construct and return a tensor of the specified type, shape and data object.
        -
        static <V extends java.lang.Number>
        Tsr<V>
        of​(java.lang.Class<V> type, - Shape shape, - java.lang.Number data) -
        Use this to construct and return a tensor of the specified type, shape and number.
        -
        static <V> Tsr<V>of​(java.lang.Class<V> type, - Shape shape, - java.lang.Object data) -
        Use this to construct and return a tensor of the specified type, shape and data object.
        -
        static <V> Tsr<V>of​(java.lang.Class<V> type, - Shape shape, - java.util.List<V> data) -
        Use this to construct and return a tensor of the specified type, shape and list of items.
        -
        static <V> Tsr<V>of​(java.lang.Class<V> valueType, - Shape shape, - Arg.Seed seed) -
        Use this to construct and return a seeded tensor of the specified type.
        -
        static <T> Tsr<T>of​(java.lang.Iterable<T> iterable) -
        Constructs a vector of objects based on the provided iterable.
        -
        static <T> Tsr<T>of​(java.lang.Object... args) -
        This static Tsr factory method tries to interpret the provided - arguments to create the instance the use might wants.
        -
        static <V> Tsr<V>of​(java.lang.String expression, - boolean doAD, - java.util.List<Tsr<V>> tensors) -
        This method takes a list of tensors and a String expression describing - operations which ought to be applied to the tensors in said list.
        -
        static <V> Tsr<V>of​(java.lang.String expression, - boolean doAD, - Tsr<V>... tensors) -
        This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array.
        -
        static <V> Tsr<V>of​(java.lang.String expression, - java.util.List<Tsr<V>> inputs) -
        This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.
        -
        static <T> Tsr<T>of​(java.lang.String e1, - Tsr<T> a, - char o, - Tsr<T> b, - java.lang.String e2) -
        Use this to conveniently operate on 2 tensors.
        -
        static <T> Tsr<T>of​(java.lang.String e1, - Tsr<T> a, - java.lang.String e2) -
        Use this to conveniently operate on a tensor.
        -
        static <T> Tsr<T>of​(java.lang.String e1, - Tsr<T> a, - java.lang.String e2, - Tsr<T> b, - java.lang.String e3, - Tsr<T> c, - java.lang.String e4) -
        Use this to conveniently operate on 3 tensors.
        -
        static <V> Tsr<V>of​(java.lang.String expression, - Tsr<V> tensor) -
        This method takes a tensor and a String expression describing - operations which ought to be applied to said tensor.
        -
        static <V> Tsr<V>of​(java.lang.String expression, - Tsr<V>... tensors) -
        This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array.
        -
        static <V extends java.lang.Number>
        Tsr<V>
        of​(java.lang.String expression, - V... inputs) -
        This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.
        -
        static Tsr<java.lang.Double>of​(java.util.List<? extends java.lang.Number> shape, - java.lang.String seed) -
        This factory method will create and return a Tsr instance - based on a list of Number instances whose rounded values will be interpreted as - the shape of this new Tsr instance and a seed which will serve - as a source of pseudo randomness to generate the values for the new instance.
        -
        static <V> Tsr<V>of​(java.util.List<? extends java.lang.Number> shape, - java.util.List<V> items) -
        Creates a new Tsr instance based on a list of numbers representing the shape, - and a list of values representing the value of the resulting tensor.
        -
        static <T> Tsr<T>of​(java.util.List<java.lang.Integer> shape, - T item) -
        This is a convenient factory method for creating Tsr instances for - values of type Tsr based on a list of integers - defining a shape made up of axes sizes as well as a scalar value of type Tsr - which will fill out the data array spanned by the provided shape information.
        -
        static Tsr<java.lang.Object>of​(java.util.List<java.lang.Object> conf) -
        This factory method will turn a list of values or nested lists of values into a Tsr - instance with the corresponding rank and shape.
        -
        static <T> Tsr<T>of​(DataType<T> type, - int[] shape, - Filler<T> filler) -
        This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values.
        -
        static <T> Tsr<T>of​(DataType<T> type, - java.util.List<java.lang.Integer> shape, - Filler<T> filler) -
        This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values.
        -
        static <T> Tsr<T>of​(DataType<T> type, - Shape shape, - Filler<T> filler) -
        This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values.
        -
        static <V> Tsr<V>of​(DataType<V> dataType, - int[] shape, - java.lang.Object data) -
        This factory method is among the most flexible and forgiving ways to create a Tsr instance.
        -
        static <V> Tsr<V>of​(DataType<V> dataType, - java.util.List<java.lang.Integer> shape, - java.util.List<V> data) -
        Use this to construct and return a tensor of the specified type, shape and data object.
        -
        static <V extends N,​N>
        Tsr<V>
        of​(DataType<V> dataType, - Device<N> device, - Shape shape, - java.lang.Object data) -
        This factory method is among the most flexible and forgiving ways to create a Tsr instance.
        -
        static <V> Tsr<V>of​(DataType<V> dataType, - NDConstructor ndConstructor, - Data<V> data) -
        This factory method a raw tensor constructor which will not perform any type checking - or data conversion on the data provided to it.
        -
        static <V> Tsr<V>of​(DataType<V> type, - Shape shape) -
        Use this to construct and return a tensor of the specified type and shape.
        -
        static <V> Tsr<V>of​(DataType<V> dataType, - Shape shape, - java.lang.Object data) -
        This factory method is among the most flexible and forgiving ways to create a Tsr instance.
        -
        static <V> Tsr<V>of​(DataType<V> dataType, - Shape shape, - java.util.List<V> data) -
        Use this to construct and return a tensor of the specified type, shape and a list of items.
        -
        static Tsr<java.lang.Boolean>of​(Shape shape, - boolean[] values) -
        Use this to construct and return a boolean tensor of the specified shape and initial values.
        -
        static Tsr<java.lang.Byte>of​(Shape shape, - byte[] values) -
        Use this to construct and return a byte tensor of the specified shape and initial values.
        -
        static Tsr<java.lang.Double>of​(Shape shape, - double value) -
        Use this to construct and return a homogeneously populated double tensor of the specified shape.
        -
        static Tsr<java.lang.Double>of​(Shape shape, - double[] values) -
        Use this to construct and return a double tensor of the specified shape and initial values.
        -
        static Tsr<java.lang.Float>of​(Shape shape, - float value) -
        Use this to construct and return a homogeneously populated float tensor of the specified shape.
        -
        static Tsr<java.lang.Float>of​(Shape shape, - float[] values) -
        Use this to construct and return a float tensor of the specified shape and initial values.
        -
        static Tsr<java.lang.Integer>of​(Shape shape, - int[] values) -
        Use this to construct and return an int tensor of the specified shape and initial values.
        -
        static Tsr<java.lang.Long>of​(Shape shape, - long[] values) -
        Use this to construct and return a long tensor of the specified shape and initial values.
        -
        static Tsr<java.lang.Short>of​(Shape shape, - short[] values) -
        Use this to construct and return a short tensor of the specified shape and initial values.
        -
        static <V> Tsr<V>of​(Shape shape, - java.util.List<V> items) -
        Creates a new Tsr instance based on a shape tuple of numbers representing the nd-array shape, - and a list of items representing the value of the resulting tensor.
        -
        static <V> Tsr<V>of​(Shape shape, - Data<V> data) -
        Use this to construct and return a tensor of the specified shape and data object.
        - This method is typically used like this:
        -
        static <T> Tsr<T>of​(Shape shape, - T value) -
        This is a convenient factory method for creating Tsr instances for - representing items of type Tsr.
        -
        static <T> Tsr<T>of​(Tsr<T> a, - char o, - Tsr<T> b) -
        Use this to conveniently operate on 2 tensors.
        -
        static <T> Tsr<T>of​(Tsr<T> a, - char o1, - Tsr<T> b, - char o2, - Tsr<T> c) -
        Use this to conveniently operate on 3 tensors.
        -
        static <V> Tsr<V>ofAny​(java.lang.Class<V> type, - Shape shape, - java.lang.Object data) -
        Use this to construct and return a tensor of the specified type, shape and data object.
        -
        static WithShapeOrScalarOrVectorOnDevice<java.lang.Byte>ofBytes() -
        This is a simple convenience method which is simply calling the of(Class) - method like so: of(Byte.class).
        -
        static WithShapeOrScalarOrVectorOnDevice<java.lang.Double>ofDoubles() -
        This is a simple convenience method which is simply calling the of(Class) - method like so: of(Double.class).
        -
        static WithShapeOrScalarOrVectorOnDevice<java.lang.Float>ofFloats() -
        This is a simple convenience method which is simply calling the of(Class) - method like so: of(Float.class).
        -
        static WithShapeOrScalarOrVectorOnDevice<java.lang.Integer>ofInts() -
        This is a simple convenience method which is simply calling the of(Class) - method like so: of(Integer.class).
        -
        static <V> Tsr<V>ofRandom​(java.lang.Class<V> valueTypeClass, - int... shape) -
        This factory method produces a randomly populated tensor of the provided - type and shape using a hard coded default seed.
        -
        static WithShapeOrScalarOrVectorOnDevice<java.lang.Short>ofShorts() -
        This is a simple convenience method which is simply calling the of(Class) - method like so: of(Short.class).
        -
        default Tsr<V>permute​(int... dims) -
        Returns a view of the original tensor input with its dimensions permuted.
        - Consider a 3-dimensional tensor x with shape (2×3×5), - then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
        -
        default Tsr<V>plus​(Tsr<V> other) -
        This method will produce the addition of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method.
        -
        default Tsr<V>plus​(V value) -
        This method will create a new Tsr - with the provided double scalar added to all elements of this Tsr.
        -
        default Tsr<V>power​(Tsr<V> other) -
        This will produce the power of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method.
        -
        default Tsr<V>power​(V value) -
        Raises all items of this tensor to the power of the provided value.
        -
        default Tsr<V>relu() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>rem​(int other) -
        This method is synonymous to the mod(int) method.
        -
        default Tsr<V>reshape​(int... shape) -
        Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape.
        -
        booleanrqsGradient() -
        This flag will indirectly trigger the activation of the autograd / auto-differentiation system of this library! - If the flag is set to 'true' and the tensor is used for computation then - it will also receive gradients when the backward() method is being called - on any descendant tensor within the computation graph.
        -
        default Tsr<V>set​(OptimizerFactory optimizerFactory) -
        Configures an Optimizer for this tensor based on the given OptimizerFactory - which will be used to create a new Optimizer instance specific to this tensor.
        -
        Tsr<V>setGradientApplyRequested​(boolean applyRequested) -
        This flag works alongside two autograd features which can be enabled inside the library settings.
        -
        Tsr<V>setRqsGradient​(boolean rqsGradient) -
        Setting this flag to true will tell the autograd system to accumulate gradients at this tensor.
        -
        default Tsr<V>shallowClone() 
        default Tsr<V>shallowCopy() -
        This creates a copy where the underlying data is still the same.
        -
        static <T> java.util.stream.Collector<T,​?,​Tsr<T>>shaped​(int... shape) -
        Returns a Collector that accumulates the input elements into a - new Tsr with the specified shape.
        -
        static <T> java.util.stream.Collector<T,​?,​Tsr<T>>shaped​(Shape shape) -
        Returns a Collector that accumulates the input elements into a - new Tsr with the specified shape.
        -
        default Tsr<V>sig() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>sigmoid() 
        default Tsr<V>sin() -
        This method is a functionally identical to the following alternatives:
        -
        AxisOrGetTsr<V>slice() -
        This method returns a SliceBuilder instance exposing a simple builder API - which enables the configuration of a slice of the current nd-array via method chaining.
        -
        default intsliceCount() -
        This method returns the number of slices which have been - created from this nd-array.
        -
        default Tsr<V>softmax() 
        default Tsr<V>softmax​(int axis) 
        default Tsr<V>softmax​(int... axes) -
        Calculates the softmax function along the specified axes.
        -
        default Tsr<V>softplus() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>sqrt() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>sum() -
        Calculate the sum value of all values - within this tensor and returns it - in the form of a scalar tensor.
        -
        default Tsr<V>sum​(int axis) -
        Calculate the sum value of all values - within this tensor along the specified axis and returns it - in the form of a tensor.
        -
        default Tsr<V>sum​(int... axes) -
        Calculate the sum value of all values - within this tensor along the specified axes and returns it - in the form of a tensor.
        -
        default Tsr<V>T() -
        Creates and returns a new Tsr instance which is a transposed twin of this instance.
        - This is a shorter alternative to the functionally identical getT() method.
        -
        default Tsr<V>tanh() -
        This method is a functionally identical to the following alternatives:
        -
        default Tsr<V>times​(Tsr<V> other) -
        This is a functionally identical synonym to the multiply(Tsr) method.
        -
        default Tsr<V>times​(V other) 
        default Tsr<V>to​(java.lang.String deviceType) 
        Tsr<V>to​(Device<?> device) -
        This method takes a Device and tries to migrate the contents of this Tsr - instance to that Device!
        -
        default java.lang.StringtoString​(java.lang.String conf) 
        default java.lang.StringtoString​(java.util.function.Consumer<NDPrintSettings> configurator) -
        This allows you to provide a lambda which configures how this nd-array should be - converted to String instances.
        -
        default java.lang.StringtoString​(NDPrintSettings config) -
        Use this to turn this nd-array into a String instance based on the provided - NDPrintSettings instance, which allows you to configure things - like the number of chars per entry, delimiters, the number of items per line, etc.
        -
        default Tsr<V>transpose​(int dim1, - int dim2) -
        Returns a view of the original tensor input the targeted - axes are swapped / transposed.
        -
        default booleanupdate​(Component.OwnerChangeRequest<Tsr<V>> changeRequest) -
        Important : Components of type Tsr are simply gradients! - Currently, this method is used only to catch illegal arguments which - is for example the case when trying to attach a gradient with a different shape...
        -
        Tsr<V>withLabel​(java.lang.String label)
        Tsr<V>withLabels​(java.lang.String[]... labels) -
        This method receives a nested String array which - ought to contain a label for the index of this nd-array.
        -
        Tsr<V>withLabels​(java.util.List<java.util.List<java.lang.Object>> labels) -
        This method receives a nested String list which - ought to contain a label for the index of this nd-array.
        -
        Tsr<V>withLabels​(java.util.Map<java.lang.Object,​java.util.List<java.lang.Object>> labels) -
        This method provides the ability to - label not only the indices of the shape of this nd-array, but also - the dimension of the shape.
        -
        default Tsr<V>xor​(double value) -
        This method is a functionally identical synonym to the power(Tsr) method.
        -
        default Tsr<V>xor​(Tsr<V> other) -
        This method is a functionally identical synonym to the power(Tsr) method.
        -
        - -
          -
        • - - -

          Methods inherited from interface java.lang.Iterable

          -forEach, iterator, spliterator
        • -
        - - -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          newInstance

          -
          static Tsr<java.lang.Object> newInstance()
          -
          This static factory method creates and return a completely empty and undefined tensor - which is void of any contents and meaning. - The use case for this would be to use the produced Tsr - instance as a target for an inline operations which fills the instance with an actual value.
          - An example of this approach would be to call the MutateTsr.putAt(List, Nda) method with an empty list as key. - This will be interpreted as an inline copy of the contents of the - second parameter into this Tsr instance.
          -
          -
          Returns:
          -
          A new and completely empty / uninitialized Tsr instance.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(Tsr<T> a,
          -                     char o,
          -                     Tsr<T> b)
          -
          Use this to conveniently operate on 2 tensors. - A simple example would be: of(a,'*',b).
          -
          -
          Type Parameters:
          -
          T - The value item type parameter for the involved tensors.
          -
          Parameters:
          -
          a - The left operand.
          -
          o - The operator, which may be '+', '-', '*'...
          -
          b - The right operand.
          -
          Returns:
          -
          The result of the operation defined by the provided character.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(Tsr<T> a,
          -                     char o1,
          -                     Tsr<T> b,
          -                     char o2,
          -                     Tsr<T> c)
          -
          Use this to conveniently operate on 3 tensors. - A simple example would be: of(a,'*',b,'+',c).
          -
          -
          Type Parameters:
          -
          T - The value item type parameter for the involved tensors.
          -
          Parameters:
          -
          a - The first and left most operand.
          -
          o1 - The first operator, which may be '+', '-', '*'...
          -
          b - The second operand.
          -
          o2 - The second operator, which may also be '+', '-', '*'...
          -
          c - The third and last operand.
          -
          Returns:
          -
          The result of the operations defined by the 2 provided characters.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(java.lang.String e1,
          -                     Tsr<T> a,
          -                     java.lang.String e2)
          -
          Use this to conveniently operate on a tensor. - A simple example would be: of("sig(tanh(",a,"))").
          -
          -
          Type Parameters:
          -
          T - The value item type parameter for the involved tensor.
          -
          Parameters:
          -
          e1 - The first part of the string expression defining how the provided tensor should be processed.
          -
          a - The tensor which ought to be sent to whatever is defined by the provided expressions.
          -
          e2 - The latter part of the expression defining how the provided tensor should be executed.
          -
          Returns:
          -
          The result of the operation(s) defined by the provided strings.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(java.lang.String e1,
          -                     Tsr<T> a,
          -                     char o,
          -                     Tsr<T> b,
          -                     java.lang.String e2)
          -
          Use this to conveniently operate on 2 tensors. - A simple example would be: of("relu(",a,'-',b,")*2").
          -
          -
          Type Parameters:
          -
          T - The value item type parameter for the involved tensor.
          -
          Parameters:
          -
          e1 - The first part of the string expression defining how the provided tensor should be processed.
          -
          a - The first tensor which ought to be sent to whatever function is defined by the provided expressions.
          -
          o - An operator combining both a and b to form a result.
          -
          b - The second tensor and right operand which ought to be sent to whatever function is defined by the provided expressions.
          -
          e2 - The latter part of the expression defining how the provided tensor should be executed.
          -
          Returns:
          -
          The result of the operation(s) defined by the provided strings.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(java.lang.String e1,
          -                     Tsr<T> a,
          -                     java.lang.String e2,
          -                     Tsr<T> b,
          -                     java.lang.String e3,
          -                     Tsr<T> c,
          -                     java.lang.String e4)
          -
          Use this to conveniently operate on 3 tensors. - A simple example would be: - of("abs((",a,"-",b,") * ",c,")").
          -
          -
          Type Parameters:
          -
          T - The type parameter for the involved tensors.
          -
          Parameters:
          -
          e1 - The first part of the expression which would typically be used to define a function name.
          -
          a - The first argument.
          -
          e2 - The second part of the expression, which might be an operation.
          -
          b - The second argument.
          -
          e3 - The third part of the expression...
          -
          c - The third argument.
          -
          e4 - The last part of the expression which should syntactically match the other expression...
          -
          Returns:
          -
          The result of the calculation defined by the provided expressions and arguments.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(java.lang.Object... args)
          -
          This static Tsr factory method tries to interpret the provided - arguments to create the instance the use might wants.
          -
          -
          Parameters:
          -
          args - The arguments which ought to be interpreted.
          -
          Returns:
          -
          The result of the interpretation in the form of a Tsr instance of typ Object.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(java.lang.Iterable<T> iterable)
          -
          Constructs a vector of objects based on the provided iterable.
          -
          -
          Parameters:
          -
          iterable - The iterable of objects from which a 1D nd-array ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of objects.
          -
          -
        • -
        - - - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(java.util.List<java.lang.Integer> shape,
          -                     T item)
          -
          This is a convenient factory method for creating Tsr instances for - values of type Tsr based on a list of integers - defining a shape made up of axes sizes as well as a scalar value of type Tsr - which will fill out the data array spanned by the provided shape information.
          -
          -
          Parameters:
          -
          shape - A list of integers whose values ought to define the size of the axes of the shape of the new Tsr.
          -
          item - An object of type Tsr which will populate the data array of the new instance.
          -
          Returns:
          -
          A new Tsr instance for the generic type Tsr.
          -
          -
        • -
        - - - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(Shape shape,
          -                     T value)
          -
          This is a convenient factory method for creating Tsr instances for - representing items of type Tsr. The factory method - instantiates tensors based on a Shape tuple of integers - defining axes sizes, and a scalar item of type Tsr - which will fill out the data array spanned by the provided shape information. - A simple usage example would be: -
          
          -     Tsr.of(Shape.of( 4, 3, 6 ), 42);
          -  
          -
          -
          Parameters:
          -
          shape - An immutable tuple of integers whose values ought to define the size of the axes of the shape of the new Tsr.
          -
          value - An object of type Tsr which will populate the data array of the new instance.
          -
          Returns:
          -
          A new Tsr instance for the generic type Tsr.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Double> of​(java.util.List<? extends java.lang.Number> shape,
          -                                java.lang.String seed)
          -
          This factory method will create and return a Tsr instance - based on a list of Number instances whose rounded values will be interpreted as - the shape of this new Tsr instance and a seed which will serve - as a source of pseudo randomness to generate the values for the new instance.
          -
          -
          Parameters:
          -
          shape - A list of Number instances which will be interpreted as a shape array.
          -
          seed - A source of pseudo randomness for the Tsr instance created by this method.
          -
          Returns:
          -
          A new Tsr instance created based on a shape and a seed.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.util.List<? extends java.lang.Number> shape,
          -                     java.util.List<V> items)
          -
          Creates a new Tsr instance based on a list of numbers representing the shape, - and a list of values representing the value of the resulting tensor.
          -
          -
          Type Parameters:
          -
          V - The type parameter of the value list and returned tensor.
          -
          Parameters:
          -
          shape - A list of numbers whose integer values will be used to form the shape of the resulting Tsr.
          -
          items - A list of values which will be used to populate the data array of the resulting Tsr.
          -
          Returns:
          -
          A new Tsr instance constructed based on the provided shape and value list.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(Shape shape,
          -                     java.util.List<V> items)
          -
          Creates a new Tsr instance based on a shape tuple of numbers representing the nd-array shape, - and a list of items representing the value of the resulting tensor.
          - A simple usage example would be: -
          
          -     Tsr.of(Shape.of( 2, 3, 4 ), myListOfItems);
          -  
          -
          -
          Type Parameters:
          -
          V - The type parameter of the value list and returned tensor.
          -
          Parameters:
          -
          shape - A shape tuple of numbers whose integer values will be used to form the shape of the resulting Tsr.
          -
          items - A list of values which will be used to populate the data array of the resulting Tsr.
          -
          Returns:
          -
          A new Tsr instance constructed based on the provided shape and value list.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Object> of​(java.util.List<java.lang.Object> conf)
          -
          This factory method will turn a list of values or nested lists of values into a Tsr - instance with the corresponding rank and shape.
          -
          -
          Parameters:
          -
          conf - A list of either values or nested lists which are themselves either or.
          -
          Returns:
          -
          A new Tsr instance whose shape and data is based on the provided list structure.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(java.lang.Class<T> type,
          -                     java.util.List<java.lang.Object> conf)
          -
          This factory method will turn a list of values or nested lists of values into a Tsr - instance with the corresponding rank and shape and whose values - are of the provided type.
          -
          -
          Type Parameters:
          -
          T - The type parameter of the tensor returned by this factory method.
          -
          Parameters:
          -
          type - The type of the tensor produced by this factory method.
          -
          conf - A list of either values or nested lists which are themselves either or.
          -
          Returns:
          -
          A new Tsr instance whose shape and data is based on the provided list structure.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> WithShapeOrScalarOrVectorOnDevice<V> of​(java.lang.Class<V> type)
          -
          This is the entry point to the fluent tensor builder API for building - Tsr instances in a readable and type safe fashion. - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tsr builder API which will lead to the creation - of a tensor storing values defined by the provided type class. - A simple usage example would be: -
          
          -      Tsr.of(Double.class)
          -            .withShape( 2, 3, 4 )
          -            .andFill( 5, 3, 5 )
          -   
          - - It is also possible to define a range using the API to populate the tensor with values: -
          
          -      Tsr.of(Double.class)
          -            .withShape( 2, 3, 4 )
          -            .andFillFrom( 2 ).to( 9 ).step( 2 )
          -   
          - - If one needs a simple scalar then the following shortcut is possible: -
          
          -      Tsr.of(Float.class).scalar( 3f )
          -   
          - - This principle works for vectors as well: -
          
          -       Tsr.of(Byte.class).vector( 2, 5, 6, 7, 8 )
          -   
          - For more fine-grained control over the initialization one can - pass an initialization lambda to the API: -
          
          -       Tsr.of(Byte.class).withShape(2, 3).andWhere( (i, indices) -> i * 5 - 30 )
          -   
          -
          - Consider using the following convenience methods: - ofFloats(), ofDoubles(), ofInts(), ofBytes(), ofShorts()
          -
          -
          Parameters:
          -
          type - The type class of the items stored by the tensor built by the exposed builder API.
          -
          Returns:
          -
          The next step of the Tsr builder API which exposes methods for defining shapes.
          -
          -
        • -
        - - - -
          -
        • -

          ofDoubles

          -
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Double> ofDoubles()
          -
          This is a simple convenience method which is simply calling the of(Class) - method like so: of(Double.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tsr builder API which in this case will lead to the creation - of a tensor storing doubles.
          - A simple usage example would be: -
          
          -     Tsr.ofDoubles()
          -           .withShape( 2, 3, 4 )
          -           .andFill( 5d, 3d, 5d )
          -  
          -
          -
          Returns:
          -
          The next step of the Tsr builder API which exposes methods for defining shapes.
          -
          -
        • -
        - - - -
          -
        • -

          ofFloats

          -
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Float> ofFloats()
          -
          This is a simple convenience method which is simply calling the of(Class) - method like so: of(Float.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tsr builder API which in this case will lead to the creation - of a tensor storing floats.
          - A simple usage example would be: -
          
          -     Tsr.ofFloats()
          -           .withShape( 2, 3, 4 )
          -           .andFill( 5f, 7f, 11f )
          -  
          -
          -
          Returns:
          -
          The next step of the Tsr builder API which exposes methods for defining shapes.
          -
          -
        • -
        - - - -
          -
        • -

          ofInts

          -
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Integer> ofInts()
          -
          This is a simple convenience method which is simply calling the of(Class) - method like so: of(Integer.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tsr builder API which in this case will lead to the creation - of a tensor storing integers.
          -
          -
          Returns:
          -
          The next step of the Tsr builder API which exposes methods for defining shapes.
          -
          -
        • -
        - - - -
          -
        • -

          ofShorts

          -
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Short> ofShorts()
          -
          This is a simple convenience method which is simply calling the of(Class) - method like so: of(Short.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tsr builder API which in this case will lead to the creation - of a tensor storing shorts.
          -
          -
          Returns:
          -
          The next step of the Tsr builder API which exposes methods for defining shapes.
          -
          -
        • -
        - - - -
          -
        • -

          ofBytes

          -
          static WithShapeOrScalarOrVectorOnDevice<java.lang.Byte> ofBytes()
          -
          This is a simple convenience method which is simply calling the of(Class) - method like so: of(Byte.class). - The returned WithShapeOrScalarOrVector is the next step in the - fluent Tsr builder API which in this case will lead to the creation - of a tensor storing bytes.
          -
          -
          Returns:
          -
          The next step of the Tsr builder API which exposes methods for defining shapes.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Double> of​(double... value)
          -
          Constructs a vector of doubles based on the provided array.
          -
          -
          Parameters:
          -
          value - The array of doubles from which a 1D tensor ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of doubles.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Double> of​(double value)
          -
          -
          Parameters:
          -
          value - The scalar value which ought to be represented as tensor.
          -
          Returns:
          -
          A scalar double tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Float> of​(float... value)
          -
          Constructs a vector of floats based on the provided array.
          -
          -
          Parameters:
          -
          value - The array of floats from which a 1D tensor ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of floats.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Float> of​(float value)
          -
          Description copied from interface: Nda
          -
          Constructs a vector of floats based on the provided array.
          -
          -
          Parameters:
          -
          value - The scalar value which ought to be represented as tensor.
          -
          Returns:
          -
          A scalar float tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Byte> of​(byte... value)
          -
          Constructs a vector of bytes based on the provided array.
          -
          -
          Parameters:
          -
          value - The array of bytes from which a 1D tensor ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of bytes.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Byte> of​(byte value)
          -
          Description copied from interface: Nda
          -
          Constructs a vector of bytes based on the provided array.
          -
          -
          Parameters:
          -
          value - The scalar value which ought to be represented as tensor.
          -
          Returns:
          -
          A scalar byte tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Integer> of​(int... value)
          -
          Constructs a vector of ints based on the provided array.
          -
          -
          Parameters:
          -
          value - The array of ints from which a 1D tensor ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of ints.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Integer> of​(int value)
          -
          Description copied from interface: Nda
          -
          Constructs a vector of ints based on the provided array.
          -
          -
          Parameters:
          -
          value - The scalar value which ought to be represented as tensor.
          -
          Returns:
          -
          A scalar int tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Long> of​(long... value)
          -
          Constructs a vector of longs based on the provided array.
          -
          -
          Parameters:
          -
          value - The array of longs from which a 1D tensor ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of longs.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Long> of​(long value)
          -
          Description copied from interface: Nda
          -
          Constructs a vector of longs based on the provided array.
          -
          -
          Parameters:
          -
          value - The scalar value which ought to be represented as tensor.
          -
          Returns:
          -
          A scalar long tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Short> of​(short... value)
          -
          Constructs a vector of shorts based on the provided array.
          -
          -
          Parameters:
          -
          value - The array of shorts from which a 1D tensor ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of shorts.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Short> of​(short value)
          -
          Description copied from interface: Nda
          -
          Constructs a vector of shorts based on the provided array.
          -
          -
          Parameters:
          -
          value - The scalar value which ought to be represented as tensor.
          -
          Returns:
          -
          A scalar short tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Boolean> of​(boolean... value)
          -
          Constructs a vector of booleans based on the provided array.
          -
          -
          Parameters:
          -
          value - The array of booleans from which a 1D tensor ought to be constructed.
          -
          Returns:
          -
          A vector / 1D tensor of shorts.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.Class<V> valueType,
          -                     Shape shape,
          -                     Arg.Seed seed)
          -
          Use this to construct and return a seeded tensor of the specified type.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          valueType - The type class of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          seed - An arbitrary String whose hash will be used to as a seed.
          -
          Returns:
          -
          A newly created and seeded tensor of the provided type and shape.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Double> of​(Shape shape,
          -                                double value)
          -
          Use this to construct and return a homogeneously populated double tensor of the specified shape.
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          value - The value which ought to be used to populate the tensor homogeneously.
          -
          Returns:
          -
          A new tensor instance with the provided shape and initial value.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Double> of​(Shape shape,
          -                                double[] values)
          -
          Use this to construct and return a double tensor of the specified shape and initial values. - The length of the provided array does not have to match the number of elements - defined by the provided shape, the tensor will be populated based on repeated iteration over the - provided double array.
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          values - The values which ought to be used to populate the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Integer> of​(Shape shape,
          -                                 int[] values)
          -
          Use this to construct and return an int tensor of the specified shape and initial values. - The length of the provided array does not have to match the number of elements - defined by the provided shape, the tensor will be populated based on repeated iteration over the - provided int array.
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          values - The values which ought to be used to populate the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Byte> of​(Shape shape,
          -                              byte[] values)
          -
          Use this to construct and return a byte tensor of the specified shape and initial values. - The length of the provided array does not have to match the number of elements - defined by the provided shape, the tensor will be populated based on repeated iteration over the - provided byte array..
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          values - The values which ought to be used to populate the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Long> of​(Shape shape,
          -                              long[] values)
          -
          Use this to construct and return a long tensor of the specified shape and initial values. - The length of the provided array does not have to match the number of elements - defined by the provided shape, the tensor will be populated based on repeated iteration over the - provided long array..
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          values - The values which ought to be used to populate the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Short> of​(Shape shape,
          -                               short[] values)
          -
          Use this to construct and return a short tensor of the specified shape and initial values. - The length of the provided array does not have to match the number of elements - defined by the provided shape, the tensor will be populated based on repeated iteration over the - provided short array..
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          values - The values which ought to be used to populate the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Float> of​(Shape shape,
          -                               float[] values)
          -
          Use this to construct and return a float tensor of the specified shape and initial values. - The length of the provided array does not have to match the number of elements - defined by the provided shape, the tensor will be populated based on repeated iteration over the - provided float array..
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          values - The values which ought to be used to populate the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Float> of​(Shape shape,
          -                               float value)
          -
          Use this to construct and return a homogeneously populated float tensor of the specified shape.
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          value - The value which ought to be used to populate the tensor homogeneously.
          -
          Returns:
          -
          A new tensor instance with the provided shape and initial value.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static Tsr<java.lang.Boolean> of​(Shape shape,
          -                                 boolean[] values)
          -
          Use this to construct and return a boolean tensor of the specified shape and initial values. - The length of the provided array does not have to match the number of elements - defined by the provided shape, the tensor will be populated based on repeated iteration over the - provided boolean array..
          -
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          values - The values which ought to be used to populate the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(Shape shape,
          -                     Data<V> data)
          -
          Use this to construct and return a tensor of the specified shape and data object.
          - This method is typically used like this:
          -
          
          -      Tsr<Integer> tensor = Tsr.of( Shape.of(2,3), Data.of(1,2,3,4,5,6) );
          -  
          - The resulting tensor will have the shape [2,3] and the values [1,2,3,4,5,6].
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          data - The data object which contains the values to be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(DataType<V> type,
          -                     Shape shape)
          -
          Use this to construct and return a tensor of the specified type and shape.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          type - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of any number of axis-sizes.
          -
          Returns:
          -
          A newly created tensor of the provided type and shape.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.Class<V> type,
          -                     Shape shape,
          -                     java.lang.Object data)
          -
          Use this to construct and return a tensor of the specified type, shape and data object.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          type - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of an array of axis-sizes.
          -
          data - The data object which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.Class<V> type,
          -                     java.util.List<java.lang.Integer> shape,
          -                     java.lang.Object data)
          -
          Use this to construct and return a tensor of the specified type, shape and data object.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          type - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of list of axis-sizes.
          -
          data - The data object which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V extends java.lang.Number> Tsr<V> of​(java.lang.Class<V> type,
          -                                              Shape shape,
          -                                              java.lang.Number data)
          -
          Use this to construct and return a tensor of the specified type, shape and number.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          type - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of a immutable tuple of axis-sizes.
          -
          data - The data object which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          ofAny

          -
          static <V> Tsr<V> ofAny​(java.lang.Class<V> type,
          -                        Shape shape,
          -                        java.lang.Object data)
          -
          Use this to construct and return a tensor of the specified type, shape and data object.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          type - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of a immutable tuple of axis-sizes.
          -
          data - The data object which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.Class<V> type,
          -                     java.util.List<java.lang.Integer> shape,
          -                     java.util.List<V> data)
          -
          Use this to construct and return a tensor of the specified type, shape and data object.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          type - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of a list of axis-sizes.
          -
          data - The list of items which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.Class<V> type,
          -                     Shape shape,
          -                     java.util.List<V> data)
          -
          Use this to construct and return a tensor of the specified type, shape and list of items. - Here a simple usage example:
          -
          
          -      Tsr<Float> tensor = Tsr.of( Float.class, Shape.of(2,3), List.of(1f,2f,3f,4f,5f,6f) );
          -  
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          type - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of an immutable tuple of axis-sizes.
          -
          data - The list of items which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(DataType<V> dataType,
          -                     java.util.List<java.lang.Integer> shape,
          -                     java.util.List<V> data)
          -
          Use this to construct and return a tensor of the specified type, shape and data object.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          dataType - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of a list of axis-sizes.
          -
          data - The data object which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(DataType<V> dataType,
          -                     Shape shape,
          -                     java.util.List<V> data)
          -
          Use this to construct and return a tensor of the specified type, shape and a list of items. - Here a simple usage example:
          -
          
          -      Tsr<Integer> tensor = Tsr.of( DataType.F32, Shape.of(2,3), List.of(1,2,3,4,5,6) );
          -  
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          dataType - The type of the items stored by the resulting tensor.
          -
          shape - The shape of the resulting tensor consisting of an immutable tuple of axis-sizes.
          -
          data - The list of items which will be used to populate the tensor.
          -
          Returns:
          -
          A newly created tensor of the provided type, shape and data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(DataType<V> dataType,
          -                     int[] shape,
          -                     java.lang.Object data)
          -
          This factory method is among the most flexible and forgiving ways to create a Tsr instance. - It receives a DataType for type safety and to ensure that the produced Tsr instance - will contain elements of the correct type, a shape array which stores the sizes of the axes that the - instance ought to possess, and finally it receives a data Object which can be anything ranging from - a List to an array or simply a single value which ought to fill out the entire Tsr.
          -
          -
          Parameters:
          -
          dataType - The data type of the data represented by Tsr instance created by this method.
          -
          shape - An array of axis sizes describing the dimensionality of the Tsr created by this method.
          -
          data - The data for the Tsr that is about to be created, which can be a list, an array or scalar.
          -
          Returns:
          -
          A new Tsr instance of the specified type, shape and containing the provided data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(DataType<V> dataType,
          -                     Shape shape,
          -                     java.lang.Object data)
          -
          This factory method is among the most flexible and forgiving ways to create a Tsr instance. - It receives a DataType for type safety and to ensure that the produced Tsr instance - will contain elements of the correct type, and a Shape tuple which stores the sizes of the axes that the - instance ought to possess, and finally it receives a data Object which can be anything ranging from - a List to an array or simply a single value which ought to fill out the entire Tsr.
          -
          -
          Parameters:
          -
          dataType - The data type of the data represented by Tsr instance created by this method.
          -
          shape - An immutable tuple of axis sizes describing the dimensionality of the Tsr created by this method.
          -
          data - The data for the Tsr that is about to be created, which can be a list, an array or scalar.
          -
          Returns:
          -
          A new Tsr instance of the specified type, shape and containing the provided data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V extends N,​N> Tsr<V> of​(DataType<V> dataType,
          -                                       Device<N> device,
          -                                       Shape shape,
          -                                       java.lang.Object data)
          -
          This factory method is among the most flexible and forgiving ways to create a Tsr instance. - It receives a DataType for type safety and to ensure that the produced Tsr instance - will contain elements of the correct type, and a Shape tuple which stores the sizes of the axes that the - instance ought to possess, and finally it receives a data Object which can be anything ranging from - a List to an array or simply a single value which ought to fill out the entire Tsr.
          -
          -
          Parameters:
          -
          dataType - The data type of the data represented by Tsr instance created by this method.
          -
          device - The device on which the tensor will be stored.
          -
          shape - An immutable tuple of axis sizes describing the dimensionality of the Tsr created by this method.
          -
          data - The data for the Tsr that is about to be created, which can be a list, an array or scalar.
          -
          Returns:
          -
          A new Tsr instance of the specified type, shape and containing the provided data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(DataType<V> dataType,
          -                     NDConstructor ndConstructor,
          -                     Data<V> data)
          -
          This factory method a raw tensor constructor which will not perform any type checking - or data conversion on the data provided to it. - It constructs the tensor expecting that the data provided to it is of the correct type - and an array of axis sizes.
          -
          -
          Type Parameters:
          -
          V - The type parameter of individual tensor items.
          -
          Parameters:
          -
          dataType - The data type of the data represented by Tsr instance created by this method.
          -
          ndConstructor - The NDConstructor that will be used to construct the Tsr instance.
          -
          data - The data for the Tsr that is about to be created, which is expected to be an array.
          -
          Returns:
          -
          A new Tsr instance of the specified type, shape and containing the provided data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(DataType<T> type,
          -                     java.util.List<java.lang.Integer> shape,
          -                     Filler<T> filler)
          -
          This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values. - Other than regular numeric types it is also possible to initialize the - tensor with regular Objects like String instances or custom data types like complex - numbers for example...
          - Therefore the constructor requires not only a shape as argument but also - the data type which ought to be allocated as well as the initialization - lambda which will be called iteratively.
          -
          -
          Type Parameters:
          -
          T - The type parameter for the actual data array items.
          -
          Parameters:
          -
          type - The data type this tensor ought to have.
          -
          shape - The shape of this new tensor ought to have.
          -
          filler - The lambda Object which ought to fill this tensor with the appropriate data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(DataType<T> type,
          -                     Shape shape,
          -                     Filler<T> filler)
          -
          This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values. - Other than regular numeric types it is also possible to initialize the - tensor with regular Objects like String instances or custom data types like complex - numbers for example...
          - Therefore the constructor requires not only a shape as argument but also - the data type which ought to be allocated as well as the initialization - lambda which will be called iteratively. - Here a simple usage example:
          -
          
          -      Tsr<Double> tensor = Tsr.of( DataType.F64, Shape.of(2, 3), (i, j) -> i + j );
          -  
          -
          -
          Type Parameters:
          -
          T - The type parameter for the actual data array items.
          -
          Parameters:
          -
          type - The data type this tensor ought to have.
          -
          shape - The shape of this new tensor ought to have.
          -
          filler - The lambda Object which ought to fill this tensor with the appropriate data.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <T> Tsr<T> of​(DataType<T> type,
          -                     int[] shape,
          -                     Filler<T> filler)
          -
          This factory method allows the creation of tensors with an additional initialization - lambda for filling the underlying data array with desired values. - Besides regular numeric types it is also possible to initialize the - tensor with regular objects like String instances or custom data types like complex - numbers for example...
          - Therefore the constructor requires not only a shape as argument but also - the data type which ought to be allocated as well as the initialization - lambda which will be called iteratively.
          -
          -
          Type Parameters:
          -
          T - The type parameter for the actual data array items.
          -
          Parameters:
          -
          type - The data type this tensor ought to have.
          -
          shape - The shape of this new tensor ought to have.
          -
          filler - The lambda Object which ought to fill this tensor with the appropriate data.
          -
          -
        • -
        - - - - - -
          -
        • -

          of

          -
          @SafeVarargs
          -static <V extends java.lang.Number> Tsr<V> of​(java.lang.String expression,
          -                                              V... inputs)
          -
          This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.

          - The passed String will be parsed into a Function AST which will be cached - using the expression as key in case it will be used in future constructor calls - like this one, or elsewhere... - The created / retrieved Function will then be called with the supplied input list - in order to trigger an execution. - The result of which will be used for the population of the fields of this - very instance.
          - An example would be the following :
          -
            -
          • 'var a = Tsr.of( "sin( I[0] ) / I[1]", 12f, -6.34f )'
          • -
          -
          -
          Parameters:
          -
          expression - A String which will be used for parsing a Function AST.
          -
          inputs - An array of inputs which can be tensors or numeric types.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.String expression,
          -                     java.util.List<Tsr<V>> inputs)
          -
          This factory method allows for the creation and execution of Function instances - without actually instantiating them manually, - where the result will then be returned by this factory method.

          - The passed String will be parsed into a Function AST which will be cached - using the expression as key in case it will be used in future constructor calls - like this one, or elsewhere... - The created / retrieved Function will then be called with the supplied input list - in order to trigger an execution. - The result of which will be used for the population of the fields of this - very instance.
          - An example would be the following :
          -
            -
          • 'var a = Tsr.of( "sin( I[0] ) / I[1]", List.of(b, c) )'
          • -
          -
          -
          Parameters:
          -
          expression - A String which will be used for parsing a Function AST.
          -
          inputs - A list of inputs which can be tensors or numeric types.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.String expression,
          -                     boolean doAD,
          -                     java.util.List<Tsr<V>> tensors)
          -
          This method takes a list of tensors and a String expression describing - operations which ought to be applied to the tensors in said list. - It also receives a boolean flag which determines if the defined function - should be executed with autograd enabled. - The provided expression will be parsed to a Function instance expecting as many inputs - as there are array entries, namely : "I[0]", "I[1]", "I[2]", ...
          - An example would be the following :
          -
            -
          • 'var a = Tsr.of( "sin( I[0] ) / I[1]", true, List.of(b, c) )'
          • -
          - Which takes the tensor 'b' and 'c' and applies the function "f(x,y) = sin(x) / y" - element-wise to produce a new tensor 'a'! - Additionally, there is a helpful flag which allows one to specify if the - parsed Function instance emerging from the provided expression - should also allow the tracking of computations via a computation graph (GraphNode instances). - This history tracking then enables auto-differentiation.
          -
          -
          Parameters:
          -
          expression - The expression describing operations applied to the provided tensors.
          -
          doAD - A flag which when set to true commands the creation of a computation graph during operation execution.
          -
          tensors - A list of tensors used as inputs to the Function instance parsed from the provided expression.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          static <V> Tsr<V> of​(java.lang.String expression,
          -                     Tsr<V> tensor)
          -
          This method takes a tensor and a String expression describing - operations which ought to be applied to said tensor. - This expression will be parsed to a Function instance expecting one input, - namely : "I[0]"
          - An example would be the following : -
            -
          • 'var a = Tsr.of( "sin( I[0] ) * 2", b )'
          • -
          - - Which takes the tensor 'b' and applies the function "f(x) = sin(x) * 2" - element-wise to produce a new tensor 'a'!
          -
          -
          -
          Parameters:
          -
          tensor - A tensor which serves as input to the Function instance parsed from the given expression.
          -
          expression - The expression describing operations applied to the provided tensor.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          @SafeVarargs
          -static <V> Tsr<V> of​(java.lang.String expression,
          -                     Tsr<V>... tensors)
          -
          This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array. - This expression will be parsed to a Function instance expecting as many inputs - as there are array entries, namely : "I[0]", "I[1]", "I[2]", ...
          - An example would be the following : -
            -
          • 'var a = Tsr.of( "sin( I[0] ) / I[1]", b, c )'
          • -
          - - Which takes the tensor 'b' and 'c' and applies the function "f(x,y) = sin(x) / y" - element-wise to produce a new tensor 'a'!
          -
          -
          Parameters:
          -
          expression - The expression describing operations applied to the provided tensors.
          -
          tensors - An array of tensors used as inputs to the Function instance parsed from the provided expression.
          -
          -
        • -
        - - - -
          -
        • -

          of

          -
          @SafeVarargs
          -static <V> Tsr<V> of​(java.lang.String expression,
          -                     boolean doAD,
          -                     Tsr<V>... tensors)
          -
          This method takes an array of tensors and a String expression describing - operations which ought to be applied to the tensors in said array. - It also receives a boolean flag which determines if the defined function - should be executed with autograd enabled. - The provided expression will be parsed to a Function instance expecting as many inputs - as there are array entries, namely : "I[0]", "I[1]", "I[2]", ...
          - An example would be the following :
          -
            -
          • 'var a = Tsr.of( "sin( I[0] ) / I[1]", true, b, c )'
          • -
          - Which takes the tensor 'b' and 'c' and applies the function "f(x,y) = sin(x) / y" - element-wise to produce a new tensor 'a'! - Additionally, there is a helpful flag which allows one to specify if the - parsed Function instance emerging from the provided expression - should also allow the tracking of computations via a computation graph (GraphNode instances). - This history tracking then enables auto-differentiation.
          -
          -
          Parameters:
          -
          expression - The expression describing operations applied to the provided tensors.
          -
          doAD - A flag which when set to true commands the creation of a computation graph during operation execution.
          -
          tensors - An array of tensors used as inputs to the Function instance parsed from the provided expression.
          -
          -
        • -
        - - - -
          -
        • -

          ofRandom

          -
          static <V> Tsr<V> ofRandom​(java.lang.Class<V> valueTypeClass,
          -                           int... shape)
          -
          This factory method produces a randomly populated tensor of the provided - type and shape using a hard coded default seed. - If the provided type class is representing a - floating point number type (like Double or Float) then the random numbers will - be gaussian ("normally") distributed values with mean 0.0 and standard - deviation 1.0.
          -
          -
          Type Parameters:
          -
          V - The type parameter of the values stored by the returned tensor.
          -
          Parameters:
          -
          valueTypeClass - The type class of the values stored by the returned tensor.
          -
          shape - The shape of the tensor produced by this factory method.
          -
          Returns:
          -
          A randomly filled tensor of the provided type.
          -
          -
        • -
        - - - -
          -
        • -

          like

          -
          static <V> IterByOrIterFromOrAllTsr<V> like​(Tsr<V> template)
          -
          Use this factory method to instantiate a new tensor with the same data type, shape - and memory location (Device instance) as the provided template tensor.
          -
          -
          Type Parameters:
          -
          V - The type parameter defining the value type of the provided as well as returned tensor.
          -
          Parameters:
          -
          template - The template tensor whose type, shape and location should be taken to construct a new tensor.
          -
          Returns:
          -
          A new Tsr instance with the same data type, shape and memory location as the provided template.
          -
          -
        • -
        - - - -
          -
        • -

          shaped

          -
          static <T> java.util.stream.Collector<T,​?,​Tsr<T>> shaped​(int... shape)
          -
          Returns a Collector that accumulates the input elements into a - new Tsr with the specified shape.
          - Usage example :
          -
          
          -    var tensor = Stream.of( 1, 2, 3, 4, 5, 6 )
          -                      .collect( Tsr.shaped( 2, 3 ) );
          - 
          -
          -
          Type Parameters:
          -
          T - the type of the input elements
          -
          Parameters:
          -
          shape - The shape of the tensor to be returned.
          -
          Returns:
          -
          a Collector which collects all the input elements into a - Tsr, in encounter order.
          -
          -
        • -
        - - - -
          -
        • -

          shaped

          -
          static <T> java.util.stream.Collector<T,​?,​Tsr<T>> shaped​(Shape shape)
          -
          Returns a Collector that accumulates the input elements into a - new Tsr with the specified shape.
          - Usage example :
          -
          
          -    var tensor = Stream.of( 1, 2, 3, 4, 5, 6 )
          -                      .collect( Tsr.shaped( otherTensor.shape() ) );
          - 
          -
          -
          Type Parameters:
          -
          T - the type of the input elements
          -
          Parameters:
          -
          shape - The shape of the tensor to be returned.
          -
          Returns:
          -
          a Collector which collects all the input elements into a - Tsr, in encounter order.
          -
          -
        • -
        - - - -
          -
        • -

          setRqsGradient

          -
          Tsr<V> setRqsGradient​(boolean rqsGradient)
          -
          Setting this flag to true will tell the autograd system to accumulate gradients at this tensor. - This is achieved by allowing for the recording of a computation graph - for when this tensor is used in any autograd supporting operations. - This allows the autograd / auto-differentiation system to traverse said graph - for when the backward() method is called - on any descendant tensor at the most recent end of the computation graph.
          -
          -
          Parameters:
          -
          rqsGradient - The truth value determining if this tensor ought to receive gradients via - the built-in automatic backpropagation system.
          -
          Returns:
          -
          This very Tsr instance in order to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          rqsGradient

          -
          boolean rqsGradient()
          -
          This flag will indirectly trigger the activation of the autograd / auto-differentiation system of this library! - If the flag is set to 'true' and the tensor is used for computation then - it will also receive gradients when the backward() method is being called - on any descendant tensor within the computation graph.
          -
          -
          Returns:
          -
          The truth value determining if this tensor ought to receive gradients via - the built-in automatic backpropagation system.
          -
          -
        • -
        - - - -
          -
        • -

          isIntermediate

          -
          boolean isIntermediate()
          -
          Intermediate tensors are internal non-user tensors which may be eligible - for deletion when further consumed by a Function. - For the casual user of Neureka, this flag should always be false!
          -
          -
          Returns:
          -
          The truth value determining if this tensor is not a user tensor but an internal - tensor which may be eligible for deletion by Functions consuming it.
          -
          -
        • -
        - - - -
          -
        • -

          isOutsourced

          -
          default boolean isOutsourced()
          -
          Outsourced means that the tensor is stored on a Device implementation instance which is not the CPU.
          -
          -
          Returns:
          -
          The truth value determining if the data of this tensor is not actually stored inside it - in the form of a traditional primitive JVM array!
          -
          -
        • -
        - - - -
          -
        • -

          isVirtual

          -
          boolean isVirtual()
          -
          A Virtual tensor is a tensor whose underlying data array is of size 1, holding only a single value.
          - This only makes sense for homogeneously populated tensors. - An example of such a tensor would be:
          - Tsr.ofInts().withShape(x,y).all(n)

          - The reasons for this feature is that it greatly improves performance in certain cases. - In essence this feature is a form of lazy loading. -

          - Use MutateTsr.setIsVirtual(boolean) to "actualize" a "virtual" tensor, and vise versa.

          -
          -
          Returns:
          -
          The truth value determining if this tensor is "virtual" or "actual".
          -
          -
        • -
        - - - -
          -
        • -

          isDeleted

          -
          boolean isDeleted()
          -
          This will check if the MutateTsr.delete() method was previously called on this tensor. - This means that the tensor data was freed on every device - and any references inside the tensor are null (to be eligable for garbage collection).
          -
          -
          Returns:
          -
          The truth value determining if the MutateTsr.delete() method has been called oin this instance.
          -
          -
        • -
        - - - -
          -
        • -

          isEmpty

          -
          default boolean isEmpty()
          -
          A tensor is empty if it's Data storage is null. - This is true for deleted tensors or tensors which have not been initialized yet.
          -
          -
          Returns:
          -
          The truth value determining if this tensor has no Data.
          -
          -
        • -
        - - - -
          -
        • -

          isUndefined

          -
          default boolean isUndefined()
          -
          A tensor is "undefined" if it has either no NDConfiguration implementation instance - or this instance does not have a shape set for this Tsr which is needed for - a tensor to also have a rank and dimensionality...
          -
          -
          Returns:
          -
          The truth value determining if this tensor has an NDConfiguration stored internally.
          -
          -
        • -
        - - - -
          -
        • -

          isSlice

          -
          default boolean isSlice()
          -
          If this nd-array is a slice of a parent nd-array then this method will yield true. - Slices can be created by calling the variations of the "Nda.getAt(int...)" method.
          -
          -
          Specified by:
          -
          isSlice in interface Nda<V>
          -
          Returns:
          -
          The truth value determining if this nd-array is a slice of another nd-array.
          -
          See Also:
          -
          Nda.getAt(int...), -Nda.slice()
          -
          -
        • -
        - - - -
          -
        • -

          isShallowCopy

          -
          default boolean isShallowCopy()
          -
          If this nd-array is a shallow copy of a parent nd-array then this method will yield true. - Shallow copies can be created by calling the "Nda.shallowCopy()" method.
          -
          -
          Specified by:
          -
          isShallowCopy in interface Nda<V>
          -
          Returns:
          -
          The truth value determining if this nd-array is a shallow copy of another nd-array.
          -
          See Also:
          -
          Nda.shallowCopy()
          -
          -
        • -
        - - - -
          -
        • -

          isPartialSlice

          -
          default boolean isPartialSlice()
          -
          If this nd-array is a partial slice of a parent nd-array then this method will yield true. - A partial slice is a slice which does not view all the parents items. - Partial slices can be created by calling the variations of the "Nda.getAt(int...)" method. - This is the inverse of Nda.isFullSlice().
          -
          -
          Specified by:
          -
          isPartialSlice in interface Nda<V>
          -
          Returns:
          -
          The truth value determining if this nd-array is a partial slice of another nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          sliceCount

          -
          default int sliceCount()
          -
          This method returns the number of slices which have been - created from this nd-array. - It does so by accessing the Relation component if present - which internally keeps track of slices via weak references.
          -
          -
          Specified by:
          -
          sliceCount in interface Nda<V>
          -
          Returns:
          -
          The number of slices derived from this nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          isSliceParent

          -
          default boolean isSliceParent()
          -
          If slices have been derived from this nd-array then it is a "slice parent". - This is what this method will determine, in which case, it will return true.
          -
          -
          Specified by:
          -
          isSliceParent in interface Nda<V>
          -
          Returns:
          -
          The truth value determining if slices have been derived from this nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          belongsToGraph

          -
          default boolean belongsToGraph()
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. - This is because autograd requires recording a computation graph for back-prop traversal. - This autograd system however, will only be triggered by Function implementations which - are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
          - Detached functions (like those pre-instantiated in Function.Detached.*) will not attach GraphNode - instances to involved tensors which will prevent the formation of a computation graph.
          -
          -
          Returns:
          -
          The truth value determining if this tensor belongs to a recorded computation graph.
          -
          -
        • -
        - - - -
          -
        • -

          isLeave

          -
          default boolean isLeave()
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. - This is because autograd requires recording a computation graph for back-prop traversal. - This autograd system however, will only be triggered by Function implementations which - are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
          - A tensor is a leave if it is attached to a computation graph in which it is not an intermediate / branch node - but input / branch node.
          -
          -
          Returns:
          -
          The truth value determining if this tensor is attached to a computation graph as leave node.
          -
          -
        • -
        - - - -
          -
        • -

          isBranch

          -
          default boolean isBranch()
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. - This is because autograd requires recording a computation graph for back-prop traversal. - This autograd system however, will only be triggered by Function implementations which - are not "detached", meaning they have their "Function.isDoingAD()" flags set to true!
          - A tensor is a branch if it is attached to a computation graph in which it is not an input / leave node - but intermediate / branch node.
          -
          -
          Returns:
          -
          The truth value determining if this tensor is attached to a computation graph as branch node.
          -
          -
        • -
        - - - -
          -
        • -

          hasGradient

          -
          default boolean hasGradient()
          -
          Tensors can be components of other tensors which makes the - implicitly their gradients.
          -
          -
          Returns:
          -
          The truth value determining if this tensor has another tensor attached to it (which is its gradient).
          -
          -
        • -
        - - - -
          -
        • -

          gradientApplyRequested

          -
          boolean gradientApplyRequested()
          -
          This flag works alongside two autograd features which can be enabled inside the library settings. - They will come into effect when flipping their feature flags,
          - namely: 'isApplyingGradientWhenRequested' and 'isApplyingGradientWhenTensorIsUsed'
          - As the first flag name suggests gradients will be applied to their tensors when it is set to true, - however this will only happen when the second flag is set to true as well, because otherwise gradients - wouldn't be applied to their tensors automatically in the first place...
          -
          - Setting both flags to true will inhibit the effect of the second setting 'isApplyingGradientWhenTensorIsUsed' - unless a form of "permission" is being signaled to the autograd system. - This signal comes in the form of a "request" flag which marks a tensor as allowed to - be updated by its gradient.
          -
          -
          -
          Returns:
          -
          The truth value determining if the application of the gradient of this tensor is requested.
          -
          -
        • -
        - - - -
          -
        • -

          setGradientApplyRequested

          -
          Tsr<V> setGradientApplyRequested​(boolean applyRequested)
          -
          This flag works alongside two autograd features which can be enabled inside the library settings. - They will come into effect when flipping their feature flags,
          - namely: 'isApplyingGradientWhenRequested' and 'isApplyingGradientWhenTensorIsUsed'
          - As the first flag name suggests gradients will be applied to their tensors when it is set to true, - however this will only happen when the second flag is set to true as well, because otherwise gradients - wouldn't be applied to their tensors automatically in the first place...
          -
          - Setting both flags to true will inhibit effect of the second setting 'isApplyingGradientWhenTensorIsUsed' - unless a form of "permission" is being signaled to the autograd system. - This signal comes in the form of a "request" flag which marks a tensor as allowed to - be updated by its gradient.
          -
          -
          -
          Parameters:
          -
          applyRequested - The truth value determining if the application of the gradient of this tensor is requested.
          -
          Returns:
          -
          This very tensor instance in order to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          update

          -
          default boolean update​(Component.OwnerChangeRequest<Tsr<V>> changeRequest)
          -
          Important : Components of type Tsr are simply gradients! - Currently, this method is used only to catch illegal arguments which - is for example the case when trying to attach a gradient with a different shape... - (Otherwise the gradient tensor "does not mind" an owner change...)
          -
          -
          Specified by:
          -
          update in interface Component<V>
          -
          Parameters:
          -
          changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
          -
          Returns:
          -
          The truth value determining if the state change should be aborted or not.
          -
          -
        • -
        - - - -
          -
        • -

          getVersion

          -
          int getVersion()
          -
          The version number is tracking how often this tensor has been mutated. - This is especially useful for checking the correcting of auto-grad!
          -
        • -
        - - - -
          -
        • -

          getDataType

          -
          DataType<V> getDataType()
          -
          This method returns the DataType instance of this Tsr, which is - a wrapper object for the actual type class representing the value items stored inside - the underlying data array of this tensor.
          -
          -
          Returns:
          -
          The DataType instance of this Tsr storing important type information.
          -
          -
        • -
        - - - -
          -
        • -

          getRepresentativeItemClass

          -
          java.lang.Class<?> getRepresentativeItemClass()
          -
          The Class returned by this method is the representative Class of the - value items of a concrete AbstractNda but not necessarily the actual Class of - a given value item, this is especially true for numeric types, which are represented by - implementations of the NumericType interface.
          - For example in the case of a tensor of type Double, this method would - return F64 which is the representative class of Double.
          - Calling the Nda.getItemType() method instead of this method would return the actual value - type class, namely: Double.
          -
          -
          Returns:
          -
          The representative type class of individual value items within this concrete AbstractNda - extension instance which might also be subclasses of the NumericType interface - to model unsigned types or other JVM foreign numeric concepts.
          -
          -
        • -
        - - - -
          -
        • -

          getMut

          -
          MutateTsr<V> getMut()
          -
          This method exposes an API for mutating the state of this tensor. - The usage of methods exposed by this API is generally discouraged - because the exposed state can easily lead to broken tensors and exceptional situations!
          -
          -

          - Only use this if you know what you are doing and - performance is critical!
          -
          - (Like in custom backend extensions for example)

          -
          -
          Specified by:
          -
          getMut in interface Nda<V>
          -
          Returns:
          -
          The unsafe API exposes methods for mutating the state of the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          mut

          -
          default MutateTsr<V> mut()
          -
          This method exposes an API for mutating the state of this tensor. - The usage of methods exposed by this API is generally discouraged - because the exposed state can easily lead to broken tensors and exceptional situations!
          -
          -

          - Only use this if you know what you are doing and - performance is critical!
          -
          - (Like custom backend extensions for example)

          -
          -
          Specified by:
          -
          mut in interface Nda<V>
          -
          Returns:
          -
          The unsafe API exposes methods for mutating the state of the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          reshape

          -
          default Tsr<V> reshape​(int... shape)
          -
          Returns a nd-array with the same data and number of elements as this nd-array, but with the specified shape. - When possible, the returned nd-array will be a view of this nd-array. - - A single dimension may be -1, in which case it’s inferred from the remaining - dimensions and the number of elements in input. - - Keep in mind that the new shape must have the same number of elements as the original shape.
          -
          - This operation supports autograd.
          -
          -
          Specified by:
          -
          reshape in interface Nda<V>
          -
          Parameters:
          -
          shape - The new shape of the returned nd-array.
          -
          Returns:
          -
          A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
          -
          -
        • -
        - - - -
          -
        • -

          permute

          -
          default Tsr<V> permute​(int... dims)
          -
          Returns a view of the original tensor input with its dimensions permuted.
          - Consider a 3-dimensional tensor x with shape (2×3×5), - then calling x.permute(1, 0, 2) will return a 3-dimensional tensor of shape (3×2×5).
          -
          -
          Specified by:
          -
          permute in interface Nda<V>
          -
          Parameters:
          -
          dims - The desired ordering of dimensions
          -
          Returns:
          -
          A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
          -
          -
        • -
        - - - -
          -
        • -

          transpose

          -
          default Tsr<V> transpose​(int dim1,
          -                         int dim2)
          -
          Returns a view of the original tensor input the targeted - axes are swapped / transposed.
          -
          -
          Specified by:
          -
          transpose in interface Nda<V>
          -
          Parameters:
          -
          dim1 - The first dimension to be swapped.
          -
          dim2 - The second dimension to be swapped.
          -
          Returns:
          -
          A new nd-array instance with the same underlying data (~shallow copy) but with a different shape.
          -
          -
        • -
        - - - -
          -
        • -

          to

          -
          Tsr<V> to​(Device<?> device)
          -
          This method takes a Device and tries to migrate the contents of this Tsr - instance to that Device!
          -
          -
          Parameters:
          -
          device - The Device which should host this Tsr as well as be added to its components list.
          -
          Returns:
          -
          This very class to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          to

          -
          default Tsr<V> to​(java.lang.String deviceType)
          -
          -
          Parameters:
          -
          deviceType - A search key identifying the device onto which this tensor should be stored.
          -
          Returns:
          -
          This very tensor instance in order to enable method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          set

          -
          default Tsr<V> set​(OptimizerFactory optimizerFactory)
          -
          Configures an Optimizer for this tensor based on the given OptimizerFactory - which will be used to create a new Optimizer instance specific to this tensor. - The Optimizer instance will be attached to this tensor as a component - and then called to perform the actual optimization when the applyGradient() method is called. -

          - Here a simple example of how to use this method: -

          
          -  var t = Tsr.of( 1.0, 2.0, 3.0 ).set( Optimizer.ADAM );
          -  
          -

          - As you can see, the Optimizer interface exposes various types of popular - optimization algorithm factories which can be used to quickly and conveniently create - an Optimizer instance for a particular tensor.

          -
          -
          Parameters:
          -
          optimizerFactory - The OptimizerFactory which will be used to create a new Optimizer instance.
          -
          Returns:
          -
          This tensor instance to allow for method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          backward

          -
          default Tsr<V> backward​(Tsr<V> error)
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. - This is because autograd requires recording a computation graph for back-prop traversal. - If this tensor is part of a computation graph then this method - will traverse an error backward in the recorded history towards tensors which require - the accumulation of gradients.
          -
          -
          Parameters:
          -
          error - A tensor which is back-propagated to gradients. Must match the size og this tensor.
          -
          Returns:
          -
          This tensor, to allow for method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          backward

          -
          default Tsr<V> backward​(double value)
          -
          Tensors which are used or produced by the autograd system will have a GraphNode component attached to them. - This is because autograd requires recording a computation graph for back-prop traversal. - If this tensor is part of a computation graph then this method - will traverse an error backward in the recorded history towards tensors which require - the accumulation of gradients.
          -
          - This method turns the given scalar value and - turns it into a matching tensor ( with the same shape) - which will then be back-propagated through the - recorded computation graph.
          -
          -
          Parameters:
          -
          value - A scalar which is back-propagated to gradients. Must match the size og this tensor.
          -
          Returns:
          -
          The tensor, to allow for method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          backward

          -
          default Tsr<V> backward()
          -
          Use this to back-propagate an error signal of 1.0 through the recorded computation graph. - Tensors which are used or produced by operations supporting the autograd system - will have this graph defined by GraphNode components attached to them. - This is because autograd requires recording a computation graph for back-prop traversal. - If this tensor is part of a computation graph then this method - will traverse an error backward in the recorded history towards tensors which require - the accumulation of gradients.
          -
          - This method assumes that the user wants to back-propagate - an error of "1" having the same shape as - this tensor.
          -
          -
          Returns:
          -
          This tensor to allow for method chaining.
          -
          -
        • -
        - - - -
          -
        • -

          getGradient

          -
          default java.util.Optional<Tsr<V>> getGradient()
          -
          -
          Returns:
          -
          The gradient of this tensor which is internally stored as component.
          -
          -
        • -
        - - - -
          -
        • -

          gradient

          -
          default java.util.Optional<Tsr<V>> gradient()
          -
          This is a functionally identical alternative to the getGradient() method.
          -
          -
          Returns:
          -
          The gradient of this tensor which is internally stored as component.
          -
          -
        • -
        - - - -
          -
        • -

          applyGradient

          -
          default void applyGradient()
          -
          If this tensor owns a gradient tensor as component, then it can be applied by this method.
          - "Applying" a gradient to a tensor simply means adding the values inside the gradient element-wise - to the owning host tensor via an inline operation.
          -
        • -
        - - - -
          -
        • -

          getDevice

          -
          default Device<V> getDevice()
          -
          -
          Returns:
          -
          The device on which this tensor is stored or CPU if it is not outsourced.
          -
          -
        • -
        - - - -
          -
        • -

          getGraphNode

          -
          default java.util.Optional<GraphNode<V>> getGraphNode()
          -
          -
          Returns:
          -
          The graph node optional of the computation graph to which this tensor belongs - or an empty optional if not part of a graph.
          -
          -
        • -
        - - - -
          -
        • -

          graphNode

          -
          default java.util.Optional<GraphNode<V>> graphNode()
          -
          This is a functionally identical alternative to getGraphNode().
          -
          -
          Returns:
          -
          The graph node optional of the computation graph to which this tensor belongs - or an empty optional if not part of a graph.
          -
          -
        • -
        - - - -
          -
        • -

          getFrame

          -
          default java.util.Optional<NDFrame<V>> getFrame()
          -
          -
          Returns:
          -
          An instance of the NDFrame component if present.
          -
          -
        • -
        - - - -
          -
        • -

          frame

          -
          default java.util.Optional<NDFrame<V>> frame()
          -
          This is a functionally identical alternative to getFrame().
          -
          -
          Returns:
          -
          An instance of the NDFrame component if present.
          -
          -
        • -
        - - - -
          -
        • -

          detached

          -
          default Tsr<V> detached()
          -
          This method returns a new tensor detached from any underlying computation-graph - or simply does nothing if no graph is present.
          - Nodes within a computation graph are instances of the "GraphNode" class which are also - simple components of the tensors they represent in the graph.
          - Therefore, a "detached" clone of this tensor is - simply a tensor without a GraphNode component.
          -
          -
          Returns:
          -
          This very instance in order to allow for a more streamline usage of this method.
          -
          -
        • -
        - - - -
          -
        • -

          withLabel

          -
          Tsr<V> withLabel​(java.lang.String label)
          -
          -
          Specified by:
          -
          withLabel in interface Nda<V>
          -
          Returns:
          -
          A new nd-array which is a shallow copy of this nd-array but with a different label.
          -
          -
        • -
        - - - -
          -
        • -

          withLabels

          -
          Tsr<V> withLabels​(java.lang.String[]... labels)
          -
          This method receives a nested String array which - ought to contain a label for the index of this nd-array. - The index for a single element of this nd-array would be an array - of numbers as long as the rank where every number is - in the range of the corresponding shape dimension... - Labeling an index means that for every dimension there - must be a label for elements in this range array!
          - For example the shape (2,3) could be labeled as follows:
          -
          - dim 0 : ["A", "B"]
          - dim 1 : ["1", "2", "3"]
          -
          -
          -
          Specified by:
          -
          withLabels in interface Nda<V>
          -
          Parameters:
          -
          labels - A nested String array containing labels for indexes of the nd-array dimensions.
          -
          Returns:
          -
          This nd-array (method chaining).
          -
          -
        • -
        - - - -
          -
        • -

          withLabels

          -
          Tsr<V> withLabels​(java.util.List<java.util.List<java.lang.Object>> labels)
          -
          This method receives a nested String list which - ought to contain a label for the index of this nd-array. - The index for a single element of this nd-array would be an array - of numbers as long as the rank where every number is - in the range of the corresponding shape dimension... - Labeling an index means that for every dimension there - must be a label for elements in this range array!
          - For example the shape (2,3) could be labeled as follows:
          -
          - dim 0 : ["A", "B"]
          - dim 1 : ["1", "2", "3"]
          -
          -
          -
          Specified by:
          -
          withLabels in interface Nda<V>
          -
          Parameters:
          -
          labels - A nested String list containing labels for indexes of the nd-array dimensions.
          -
          Returns:
          -
          This nd-array (method chaining).
          -
          -
        • -
        - - - -
          -
        • -

          withLabels

          -
          Tsr<V> withLabels​(java.util.Map<java.lang.Object,​java.util.List<java.lang.Object>> labels)
          -
          This method provides the ability to - label not only the indices of the shape of this nd-array, but also - the dimension of the shape. - The first and only argument of the method expects a map instance - where keys are the objects which ought to act as dimension labels - and the values are lists of labels for the indices of said dimensions. - For example the shape (2,3) could be labeled as follows:
          - [
          - "dim 0" : ["A", "B"],
          - "dim 1" : ["1", "2", "3"]
          - ]
          -
          -
          -
          Specified by:
          -
          withLabels in interface Nda<V>
          -
          Parameters:
          -
          labels - A map in which the keys are dimension labels and the values are lists of index labels for the dimension.
          -
          Returns:
          -
          This nd-array (method chaining).
          -
          -
        • -
        - - - -
          -
        • -

          is

          -
          boolean is​(java.lang.Class<?> typeClass)
          -
          This method compares the passed class with the underlying data-type of this NDArray. - If the data-type of this NDArray is equivalent to the passed class then the returned - boolean will be true, otherwise the method returns false.
          -
          -
          Parameters:
          -
          typeClass - The class which ought to be compared to the underlying data-type of this NDArray.
          -
          Returns:
          -
          The truth value of the question: Does this NDArray implementation hold the data of the passed type?
          -
          -
        • -
        - - - -
          -
        • -

          plus

          -
          default Tsr<V> plus​(Tsr<V> other)
          -
          This method will produce the addition of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method. - If the shapes of both of the involved tensors is identical then - the result will be a regular element-wise addition. - Otherwise, the method will also be able to perform broadcasting, however only if - for every pair of shape dimensions the following is true: - Either the dimensions have the same size or one of them has size 1.
          - Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
          - And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
          -
          -
          Parameters:
          -
          other - The right operand of the addition.
          -
          Returns:
          -
          The sum of this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - - - -
          -
        • -

          plus

          -
          default Tsr<V> plus​(V value)
          -
          This method will create a new Tsr - with the provided double scalar added to all elements of this Tsr. - - The shapes of this tensor is irrelevant as the provided value will - simply be broadcast to any possible shape.
          -
          -
          Parameters:
          -
          value - The right operand of the addition.
          -
          Returns:
          -
          The sum between this instance as the left and the passed double as right operand.
          -
          -
        • -
        - - - -
          -
        • -

          minus

          -
          default Tsr<V> minus​(Tsr<V> other)
          -
          Performs subtraction on - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method. - If the shapes of both of the involved tensors are identical then - the result will be a regular element-wise subtraction. - Otherwise, the method will also be able to perform broadcasting, however only if - for every pair of shape dimensions the following is true: - Either the dimensions have the same size or one of them has size 1.
          - Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
          - And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
          -
          -
          Parameters:
          -
          other - The right operand of the subtraction.
          -
          Returns:
          -
          The difference between this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - - - -
          -
        • -

          minus

          -
          default Tsr<V> minus​(V other)
          -
          This method will create a new Tsr - with the provided item subtracted from all elements of this Tsr. - - The shapes of this tensor is irrelevant as the provided item will - simply be broadcast to all items od this tensor, irrespective of any shape.
          -
          -
          Parameters:
          -
          other - The right operand of the subtraction, which is an item of the same type as this tensor.
          -
          Returns:
          -
          The difference between this instance as the left and the passed item as right operand.
          -
          -
        • -
        - - - -
          -
        • -

          negative

          -
          default Tsr<V> negative()
          -
          -
          Returns:
          -
          A clone of this tensor where the signs of all elements are flipped.
          -
          -
        • -
        - - - -
          -
        • -

          T

          -
          default Tsr<V> T()
          -
          Creates and returns a new Tsr instance which is a transposed twin of this instance.
          - This is a shorter alternative to the functionally identical getT() method.
          -
          -
          Returns:
          -
          A new transposed tensor with the same underlying Data as this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          getT

          -
          default Tsr<V> getT()
          -
          A method which returns a new Tsr instance which is a transposed twin of this instance.
          - This is an alternative to the functionally identical T() method.
          -
          -
          Returns:
          -
          A new transposed tensor with the same underlying Data as this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          mean

          -
          default Tsr<V> mean()
          -
          Calculate the mean value of all values - within this tensor and returns it - in the form of a scalar tensor.
          - This operation supports autograd.
          -
          -
          Returns:
          -
          A scalar tensor which wraps the mean value of all values of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          sum

          -
          default Tsr<V> sum()
          -
          Calculate the sum value of all values - within this tensor and returns it - in the form of a scalar tensor.
          - This operation supports autograd.
          -
          -
          Returns:
          -
          A scalar tensor which wraps the sum of all values of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          sum

          -
          default Tsr<V> sum​(int axis)
          -
          Calculate the sum value of all values - within this tensor along the specified axis and returns it - in the form of a tensor.
          - For example, if this tensor has a shape of (2, 3, 4) and the axis is 1, - then the result will be a tensor with a shape of (2, 1, 4) because the - sum of all values along the axis 1 is a single value for each of the two - first dimensions.
          - This operation supports autograd.
          -
          -
          Parameters:
          -
          axis - The axis along which the sum should be calculated.
          -
          Returns:
          -
          A tensor which wraps the sum of all values of this tensor along the specified axis.
          -
          -
        • -
        - - - -
          -
        • -

          sum

          -
          default Tsr<V> sum​(int... axes)
          -
          Calculate the sum value of all values - within this tensor along the specified axes and returns it - in the form of a tensor.
          - For example, if this tensor has a shape of (2, 3, 4) and the axes are 1 and 2, - then the result will be a tensor with a shape of (2, 1, 1) because the - sum of all values along the axis 1 and 2 is a single value for each of the two - first dimensions.
          - This operation supports autograd.
          -
          -
          Parameters:
          -
          axes - The axes along which the sum should be calculated.
          -
          Returns:
          -
          A tensor which wraps the sum of all values of this tensor along the specified axes.
          -
          -
        • -
        - - - -
          -
        • -

          min

          -
          default Tsr<V> min()
          -
          Calculate the min value of all values - within this tensor and returns it - in the form of a scalar tensor.
          - This operation supports autograd.
          -
          -
          Returns:
          -
          A scalar tensor which wraps the smallest of all values of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          max

          -
          default Tsr<V> max()
          -
          Calculate the max value of all values - within this tensor and returns it - in the form of a scalar tensor.
          - This operation supports autograd.
          -
          -
          Returns:
          -
          A scalar tensor which wraps the largest of all values of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          convDot

          -
          default Tsr<V> convDot​(Tsr<V> other)
          -
          This method performs a convolutional based dot product between the last dimension of this tensor - and the first dimension of the passed tensor.
          -
          -
          Parameters:
          -
          other - The tensor which is the right part of the dot product operation.
          -
          Returns:
          -
          A new tensor which is the dot product of this tensor and the passed one.
          -
          -
        • -
        - - - -
          -
        • -

          dot

          -
          default Tsr<V> dot​(Tsr<V> other)
          -
          Performs a dot product between the last dimension of this tensor - and the first dimension of the provided tensor. - However, currently this method can only handle matrices which means - that it is functionally completely identical to the matMul(Tsr) method.
          -
          -
          Parameters:
          -
          other - The tensor which is the right part of the dot product operation.
          -
          Returns:
          -
          A new tensor which is the dot product of this tensor and the passed one.
          -
          -
        • -
        - - - -
          -
        • -

          matMul

          -
          default Tsr<V> matMul​(Tsr<V> other)
          -
          This will produce the matrix product of - two tensors with rank 2 (matrices), where the left operand is this Tsr - instance and the right operand is the argument passed to the method.
          -
          -
          Parameters:
          -
          other - The right operand of the matrix multiplication.
          -
          Returns:
          -
          The matrix product of this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - -
          -
        • -

          conv

          -
          default Tsr<V> conv​(Tsr<V> other)
          -
          This method performs convolution between this tensor and the one passed as argument. - The convolution is performed by the Function which is registered under the name "conv".
          -
          -
          Parameters:
          -
          other - The tensor which is the right operand of the convolutional operation.
          -
          Returns:
          -
          A new tensor which is the result of the convolutional operation.
          -
          -
        • -
        - - - -
          -
        • -

          dimtrim

          -
          default Tsr<V> dimtrim()
          -
          This creates a new tensor with the same underlying Data and whose shape is trimmed. - A trimmed shape is simply a shape without preceding and trailing ones.
          - For example the shape (1x4x1x2x1) would be trimmed to (4x1x2). - The underlying operation does not perform a removal of redundant ones all together. - Only ones at the start and the beginning will be removed. - A scalar tensor will not be affected by this operation.
          -
          -
          Returns:
          -
          A tensor with the same underlying data but possibly trimmed shape without preceding or trailing ones.
          -
          -
        • -
        - - - -
          -
        • -

          isCase

          -
          boolean isCase​(Tsr<V> other)
          -
          This method name translates to the "in" keyword in Groovy! - The same is true for the "contains" method in Kotlin. - Both methods do the exact same thing, however they exist - for better language support.
          -
          -
          Parameters:
          -
          other - The tensor which will be checked.
          -
          Returns:
          -
          The answer to the following question: Is the data of the provided tensor a subset of the data of this tensor?
          -
          -
        • -
        - - - -
          -
        • -

          contains

          -
          default boolean contains​(Tsr<V> other)
          -
          This method name translates to the "in" keyword in Kotlin! - The same is true for the "isCase" method in Groovy. - Both methods do the exact same thing, however they exist - for better language support.
          -
          -
          Parameters:
          -
          other - The tensor which will be checked.
          -
          Returns:
          -
          The answer to the following question: Is the data of the provided tensor a subset of the data of this tensor?
          -
          -
        • -
        - - - -
          -
        • -

          multiply

          -
          default Tsr<V> multiply​(Tsr<V> other)
          -
          This method is synonymous to the times(Tsr) method. - Both of which will produce the product of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method. - If the shapes of both of the involved tensors is identical then - the result will be a regular element-wise product. - Otherwise, the method will also be able to perform broadcasting, however only if - for every pair of shape dimensions the following is true: - Either the dimensions have the same size or one of them has size 1.
          - Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
          - And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
          -
          -
          Parameters:
          -
          other - The right operand of the multiplication.
          -
          Returns:
          -
          The product of this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - - - -
          -
        • -

          multiply

          -
          default Tsr<V> multiply​(V other)
          -
          -
          Parameters:
          -
          other - The value which should be broadcast to all elements of a clone of this tensor.
          -
          Returns:
          -
          A new tensor where all elements are multiplied by the provided value.
          -
          -
        • -
        - - - -
          -
        • -

          times

          -
          default Tsr<V> times​(Tsr<V> other)
          -
          This is a functionally identical synonym to the multiply(Tsr) method. - Both of which will produce the product of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method. - If the shapes of both of the involved tensors is identical then - the result will be a regular element-wise product. - Otherwise, the method will also be able to perform broadcasting, however only if - for every pair of shape dimensions the following is true: - Either the dimensions have the same size or one of them has size 1.
          - Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
          - And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
          -
          -
          Parameters:
          -
          other - The right operand of the multiplication.
          -
          Returns:
          -
          The product of this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - - - -
          -
        • -

          times

          -
          default Tsr<V> times​(V other)
          -
          -
          Parameters:
          -
          other - The value which should be broadcast to all elements of a clone of this tensor.
          -
          Returns:
          -
          A new tensor where all elements are multiplied by the provided value.
          -
          -
        • -
        - - - -
          -
        • -

          multiply

          -
          default Tsr<V> multiply​(double value)
          -
          -
          Parameters:
          -
          value - The value which should be broadcast to all elements of a clone of this tensor.
          -
          Returns:
          -
          A new tensor where all elements are multiplied by the provided value.
          -
          -
        • -
        - - - -
          -
        • -

          div

          -
          default Tsr<V> div​(Tsr<V> other)
          -
          This method will produce the quotient of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method. - If the shapes of both of the involved tensors are identical then - the result will be a regular element-wise division. - Otherwise, the method will also be able to perform broadcasting, however only if - for every pair of shape dimensions the following is true: - Either the dimensions have the same size or one of them has size 1.
          - Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
          - And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
          -
          -
          Parameters:
          -
          other - The right operand of the division.
          -
          Returns:
          -
          The quotient of this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - - - -
          -
        • -

          div

          -
          default Tsr<V> div​(V value)
          -
        • -
        - - - -
          -
        • -

          mod

          -
          default Tsr<V> mod​(Tsr<V> other)
          -
          Produces the modulus of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method. - If the shapes of these 2 tensors are identical then - the result will be a regular element-wise modulo operation. - Otherwise, the method will also be able to perform broadcasting, however only if - for every pair of shape dimensions the following is true: - Either the dimensions have the same size or one of them has size 1.
          - Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
          - And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
          -
          -
          Parameters:
          -
          other - The right operand of the modulo operation.
          -
          Returns:
          -
          The modulus of this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - -
          -
        • -

          mod

          -
          default Tsr<V> mod​(int other)
          -
          -
          Parameters:
          -
          other - The value which should be broadcast to all elements of a clone of this tensor.
          -
          Returns:
          -
          A new tensor where the modulo operation is applied to all - elements using the provided int as right operand.
          -
          -
        • -
        - - - -
          -
        • -

          rem

          -
          default Tsr<V> rem​(int other)
          -
          This method is synonymous to the mod(int) method.
          -
        • -
        - - - -
          -
        • -

          power

          -
          default Tsr<V> power​(Tsr<V> other)
          -
          This will produce the power of - two tensors with the same rank (or two ranks which can be made compatible with padding ones), - where the left operand is this Tsr - instance and the right operand is the tensor passed to the method. - If the shapes of the involved tensors are identical then - the result will be a regular element-wise exponentiation. - Otherwise, the method will also be able to perform broadcasting, however only if - for every pair of shape dimensions the following is true: - Either the dimensions have the same size or one of them has size 1.
          - Here is an example of 2 matching shapes: (1, 4, 1) and (3, 4, 1)
          - And here is an example of a mismatch: (2, 4, 1) and (3, 4, 1)
          -
          -
          Parameters:
          -
          other - The right operand, also known as exponent, of the exponentiation.
          -
          Returns:
          -
          The power of this instance as the left and the passed Tsr instance as right operand.
          -
          -
        • -
        - - - - - -
          -
        • -

          power

          -
          default Tsr<V> power​(V value)
          -
          Raises all items of this tensor to the power of the provided value. - The returned tensor is a new instance which will have the same shape as this tensor.
          -
          -
          Parameters:
          -
          value - The value which should be used to raise all items of this tensor to the power of.
          -
          Returns:
          -
          A new tensor where all items are raised to the power of the provided value.
          -
          -
        • -
        - - - -
          -
        • -

          xor

          -
          default Tsr<V> xor​(Tsr<V> other)
          -
          This method is a functionally identical synonym to the power(Tsr) method.
          -
        • -
        - - - -
          -
        • -

          xor

          -
          default Tsr<V> xor​(double value)
          -
          This method is a functionally identical synonym to the power(Tsr) method.
          -
        • -
        - - - -
          -
        • -

          sig

          -
          default Tsr<V> sig()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().sigmoid().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("sig(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the sigmoid function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          tanh

          -
          default Tsr<V> tanh()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().tanh().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("tanh(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the tanh function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          relu

          -
          default Tsr<V> relu()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().relu().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("relu(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the relu function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          sin

          -
          default Tsr<V> sin()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().sin().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("sin(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the sin function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          cos

          -
          default Tsr<V> cos()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().cos().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("cos(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the cos function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          ln

          -
          default Tsr<V> ln()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().ln().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("ln(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the ln function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          softplus

          -
          default Tsr<V> softplus()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().softplus().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("softplus(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the softplus function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          exp

          -
          default Tsr<V> exp()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().exp().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("exp(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the exp function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          sqrt

          -
          default Tsr<V> sqrt()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().sqrt().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("sqrt(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the sqrt function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          log10

          -
          default Tsr<V> log10()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().log10().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("log10(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the log10 function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          cbrt

          -
          default Tsr<V> cbrt()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().cbrt().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("cbrt(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the cbrt function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          abs

          -
          default Tsr<V> abs()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().abs().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("abs(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the abs function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          neg

          -
          default Tsr<V> neg()
          -
          This method is a functionally identical to the following alternatives: -
          
          -      // Pre-instantiated:
          -      var out1 = Neureka.get().backend().getAutogradFunction().neg().call( myTensor );
          -      // Dynamically parsed and instantiated:
          -      var out2 = Function.of("neg(I[0])").call(myTensor);
          -  
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the neg function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          softmax

          -
          default Tsr<V> softmax()
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the softmax function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          softmax

          -
          default Tsr<V> softmax​(int axis)
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the softmax function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          softmax

          -
          default Tsr<V> softmax​(int... axes)
          -
          Calculates the softmax function along the specified axes.
          - For example, if this tensor has a shape of (2, 3, 4) and the axes 0 and 2 are chosen, - then the result will be a tensor of the same size where all elements summed up alongside - axis 0 and 2 would be 1. - Ao calling sum(0, 2) would in this example be a tensor of shape of (1, 3, 1) where every item is 1.
          - This operation supports autograd.
          -
          -
          Parameters:
          -
          axes - The axes along which the softmax function should be applied.
          -
          Returns:
          -
          A new tensor whose items are the result of the softmax function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          sigmoid

          -
          default Tsr<V> sigmoid()
          -
          -
          Returns:
          -
          A new tensor whose items are the result of the sigmoid function applied to the items of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          slice

          -
          AxisOrGetTsr<V> slice()
          -
          This method returns a SliceBuilder instance exposing a simple builder API - which enables the configuration of a slice of the current nd-array via method chaining.
          - The following code snippet slices a 3-dimensional nd-array into a nd-array of shape (2x1x3)
          -
          
          -  myArray.slice()
          -          .axis(0).from(0).to(1)
          -          .axis(1).at(5) // equivalent to '.from(5).to(5)'
          -          .axis().from(0).to(2)
          -          .get();
          - 
          -
          -
          Specified by:
          -
          slice in interface Nda<V>
          -
          Returns:
          -
          An instance of the SliceBuilder class exposing a readable builder API for creating slices.
          -
          -
        • -
        - - - -
          -
        • -

          concatAt

          -
          default Tsr<V> concatAt​(int axis,
          -                        Nda<V> other,
          -                        Nda<V>... ndArrays)
          -
          This method concatenates the provided nd-arrays together with this nd-array along a specified axis. - The provided nd-arrays must have the same shape and data type as the current nd-array, except for the specified axis.
          -
          -
          Specified by:
          -
          concatAt in interface Nda<V>
          -
          Parameters:
          -
          axis - The axis along which the provided nd-arrays should be concatenated. - The axis must be within the range of the rank of the current nd-array.
          -
          other - The other nd-arrays which should be concatenated with this nd-array.
          -
          ndArrays - The non-null, non-empty nd-arrays which should be concatenated together with this and the other nd-array. - The nd-arrays all must have the same shape as this nd-array, except for the specified axis. - Also, it must have the same data type as the current nd-array.
          -
          Returns:
          -
          A new nd-array which is the concatenation of the current nd-array and the provided nd-arrays.
          -
          -
        • -
        - - - -
          -
        • -

          concatAt

          -
          default Tsr<V> concatAt​(int axis,
          -                        Nda<V> other)
          -
          This method concatenates the provided nd-array together with this nd-array along a specified axis. - The provided nd-array must have the same shape and data type as this nd-array, except for the specified axis.
          -
          -
          Specified by:
          -
          concatAt in interface Nda<V>
          -
          Parameters:
          -
          axis - The axis along which the provided nd-arrays should be concatenated. - The axis must be within the range of the rank of the current nd-array.
          -
          other - The other nd-arrays which should be concatenated with this nd-array.
          -
          Returns:
          -
          A new nd-array which is the concatenation of the current nd-array and the provided nd-arrays.
          -
          -
        • -
        - - - -
          -
        • -

          getAt

          -
          Tsr<V> getAt​(int... indices)
          -
          The following method enables access to specific scalar elements within the nd-array. - The method name also translates to the subscription operator in Groovy.
          -
          -
          Specified by:
          -
          getAt in interface Nda<V>
          -
          Parameters:
          -
          indices - The index array of the element which should be returned.
          -
          Returns:
          -
          An element located at the provided index.
          -
          -
        • -
        - - - -
          -
        • -

          getAt

          -
          default Tsr<V> getAt​(java.lang.Number i)
          -
          This getter method creates and returns a slice of the original nd-array. - The returned slice is a scalar nd-array wrapping a single value element which - is being targeted by the provided integer index.
          -
          -
          Specified by:
          -
          getAt in interface Nda<V>
          -
          Parameters:
          -
          i - The index of the value item which should be returned as a nd-array instance.
          -
          Returns:
          -
          A nd-array holding a single value element which is internally still residing in the original nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          get

          -
          default Tsr<V> get​(int... indices)
          -
          The following method enables access to specific scalar elements within the nd-array. - The method name also translates to the subscription operator in Groovy.
          -
          -
          Specified by:
          -
          get in interface Nda<V>
          -
          Parameters:
          -
          indices - The index array of the element which should be returned.
          -
          Returns:
          -
          An element located at the provided index.
          -
          -
        • -
        - - - -
          -
        • -

          getAt

          -
          default Tsr<V> getAt​(java.lang.Object... args)
          -
          The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view). - The method name also translates to the subscription operator in Groovy.
          -
          -
          Specified by:
          -
          getAt in interface Nda<V>
          -
          Parameters:
          -
          args - An arbitrary number of arguments which can be used for slicing.
          -
          Returns:
          -
          A slice nd-array created based on the passed keys.
          -
          -
        • -
        - - - -
          -
        • -

          get

          -
          default Tsr<V> get​(java.lang.Object... args)
          -
          The following method enables the creation of nd-array slices which access - the same underlying data (possibly from a different view). - The method name also translates to the subscription operator in Groovy.
          -
          -
          Specified by:
          -
          get in interface Nda<V>
          -
          Parameters:
          -
          args - An arbitrary number of arguments which can be used for slicing.
          -
          Returns:
          -
          A slice nd-array created based on the passed keys.
          -
          -
        • -
        - - - -
          -
        • -

          getAt

          -
          default Tsr<V> getAt​(int i)
          -
          This getter method creates and returns a slice of the original nd-array. - The returned slice is a scalar nd-array wrapping a single value element which - is being targeted by the provided integer index.
          -
          -
          Specified by:
          -
          getAt in interface Nda<V>
          -
          Parameters:
          -
          i - The index of the value item which should be returned as a nd-array instance.
          -
          Returns:
          -
          A nd-array holding a single value element which is internally still residing in the original nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          get

          -
          default Tsr<V> get​(int i)
          -
          This getter method creates and returns a slice of the original nd-array. - The returned slice is a scalar nd-array wrapping a single value element which - is being targeted by the provided integer index.
          -
          -
          Specified by:
          -
          get in interface Nda<V>
          -
          Parameters:
          -
          i - The index of the value item which should be returned as a nd-array instance.
          -
          Returns:
          -
          A nd-array holding a single value element which is internally still residing in the original nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          get

          -
          default Tsr<V> get​(java.lang.Number i)
          -
          This getter method creates and returns a slice of the original nd-array. - The returned slice is a scalar nd-array wrapping a single value element which - is being targeted by the provided integer index.
          -
          -
          Specified by:
          -
          get in interface Nda<V>
          -
          Parameters:
          -
          i - The index of the value item which should be returned as a nd-array instance.
          -
          Returns:
          -
          A nd-array holding a single value element which is internally still residing in the original nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          get

          -
          default Tsr<V> get​(java.lang.Object key)
          -
          This method enables nd-array slicing! - It takes a key of various types and configures a slice - nd-array which shares the same underlying data as the original nd-array.
          -
          -
          Specified by:
          -
          get in interface Nda<V>
          -
          Parameters:
          -
          key - This object might be a wide range of objects including maps, lists or arrays...
          -
          Returns:
          -
          A slice nd-array or scalar value.
          -
          -
        • -
        - - - -
          -
        • -

          getAt

          -
          Tsr<V> getAt​(java.util.Map<?,​java.lang.Integer> rangToSteps)
          -
          This method is most useful when used in Groovy - where defining maps is done through square brackets, - making it possible to slice nd-arrays like so:
          -
          
          -      var b = a[[[0..0]:1, [0..0]:1, [0..3]:2]]
          -  
          - Here a single argument with the format '[i..j]:k' is equivalent - to Pythons 'i:j:k' syntax for indexing! (numpy)
          - i... start indexAlias.
          - j... end indexAlias. (inclusive!)
          - k... step size.
          -
          -
          Specified by:
          -
          getAt in interface Nda<V>
          -
          Parameters:
          -
          rangToSteps - A map where the keys define where axes should be sliced and values which define the steps for the specific axis.
          -
          Returns:
          -
          A nd-array slice with an offset based on the provided map keys and - steps based on the provided map values.
          -
          -
        • -
        - - - -
          -
        • -

          getAt

          -
          Tsr<V> getAt​(java.util.List<?> key)
          -
          This method enables nd-array slicing! - It takes a key of various types and configures a slice - nd-array which shares the same underlying data as the original nd-array.
          -
          -
          Specified by:
          -
          getAt in interface Nda<V>
          -
          Parameters:
          -
          key - This object might be a wide range of objects including maps, lists or arrays...
          -
          Returns:
          -
          A slice nd-array or scalar value.
          -
          -
        • -
        - - - -
          -
        • -

          mapTo

          -
          default <T> Tsr<T> mapTo​(java.lang.Class<T> typeClass,
          -                         java.util.function.Function<V,​T> mapper)
          -

          - This is a convenience method for mapping a nd-array to a nd-array of new type - based on a provided target item type and mapping lambda. - Here a simple example: -

          -
          
          -     Nda<String>  a = Nda.of(String.class).vector("1", "2", "3");
          -     Nda<Integer> b = a.mapTo(Integer.class, s -> Integer.parseInt(s));
          - 
          -

          - Note:
          - The provided lambda cannot be executed anywhere else but the CPU. - This is a problem if this nd-array lives somewhere other than the JVM. - So, therefore, this method will temporally transfer this nd-array from - where ever it may reside back to the JVM, execute the mapping lambda, and - then transfer the result back to the original location. -

          -
          -
          Specified by:
          -
          mapTo in interface Nda<V>
          -
          Type Parameters:
          -
          T - The type parameter of the items of the returned nd-array.
          -
          Parameters:
          -
          typeClass - The class of the item type to which the items of this nd-array should be mapped.
          -
          mapper - The lambda which maps the items of this nd-array to a new one.
          -
          Returns:
          -
          A new nd-array of type T.
          -
          -
        • -
        - - - -
          -
        • -

          map

          -
          default Tsr<V> map​(java.util.function.Function<V,​V> mapper)
          -

          - This method is a convenience method for mapping the items of this nd-array to another - nd-array of the same type based on the provided lambda function, which will be applied - to all items of this nd-array individually (element-wise). -

          - Here a simple example: -
          
          -  Nda<String> a = Nda.of(String.class).vector("1", "2", "3");
          -  Nda<String> b = a.map( s -> s + "!" );
          -  
          - Note:
          - The provided lambda cannot be executed anywhere else but the CPU. - This is a problem if this nd-array lives somewhere other than the JVM. - So, therefore, this method will temporally transfer this nd-array from where ever it may reside - back to the JVM, execute the mapping lambda, and then transfer the result back to the original location.
          -
          -
          Specified by:
          -
          map in interface Nda<V>
          -
          Parameters:
          -
          mapper - The lambda which maps the items of this nd-array to a new one.
          -
          Returns:
          -
          A new nd-array of type V.
          -
          -
        • -
        - - - -
          -
        • -

          asImage

          -
          java.awt.image.BufferedImage asImage​(Tsr.ImageType type)
          -
          Turns this tensor into a BufferedImage based on the provided - Tsr.ImageType formatting choice.
          -
          -
          Parameters:
          -
          type - The type of format used to create the buffered image.
          -
          Returns:
          -
          A BufferedImage populated with the contents of this tensor.
          -
          -
        • -
        - - - -
          -
        • -

          asType

          -
          <T> T asType​(java.lang.Class<T> typeClass)
          -
          -
          Type Parameters:
          -
          T - The type parameter of the type that will be returned.
          -
          Parameters:
          -
          typeClass - The class which is the target of the type conversion.
          -
          Returns:
          -
          An instance of the supplied type class.
          -
          -
        • -
        - - - -
          -
        • -

          toString

          -
          default java.lang.String toString​(java.lang.String conf)
          -
        • -
        - - - -
          -
        • -

          toString

          -
          default java.lang.String toString​(NDPrintSettings config)
          -
          Use this to turn this nd-array into a String instance based on the provided - NDPrintSettings instance, which allows you to configure things - like the number of chars per entry, delimiters, the number of items per line, etc.
          -
          -
          Specified by:
          -
          toString in interface Nda<V>
          -
          -
        • -
        - - - -
          -
        • -

          toString

          -
          default java.lang.String toString​(java.util.function.Consumer<NDPrintSettings> configurator)
          -
          This allows you to provide a lambda which configures how this nd-array should be - converted to String instances. - The provided Consumer will receive a NDPrintSettings instance - which allows you to change various settings with the help of method chaining.
          - Here is an example: -
          
          -       t.toString(it ->
          -           it.setHasSlimNumbers(false)
          -             .setIsScientific(true)
          -             .setIsCellBound(true)
          -             .setIsMultiline(true)
          -             .setCellSize(15)
          -          );
          -  
          -
          -
          Specified by:
          -
          toString in interface Nda<V>
          -
          Parameters:
          -
          configurator - A consumer of the NDPrintSettings ready to be configured.
          -
          Returns:
          -
          The String representation of this nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          deepCopy

          -
          Tsr<V> deepCopy()
          -
          This method creates and returns a new nd-array instance - which is not only a copy of the configuration of this nd-array but also a copy of - the underlying data array.
          - (Note: the underlying nd-array will not be attached to any kind of computation graph)
          -
          -
          Specified by:
          -
          deepCopy in interface Nda<V>
          -
          Returns:
          -
          A new nd-array instance which is a deep copy of this nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          shallowCopy

          -
          default Tsr<V> shallowCopy()
          -
          This creates a copy where the underlying data is still the same.
          - (Note: the underlying nd-array will not be attached to any kind of computation graph)
          -
          -
          Specified by:
          -
          shallowCopy in interface Nda<V>
          -
          Returns:
          -
          A shallow copy where the underlying data is shared with this nd-array.
          -
          -
        • -
        - - - -
          -
        • -

          deepClone

          -
          Tsr<V> deepClone()
          -
          This is almost identical to the deepCopy() method except that - the returned tensor will have autograd support, meaning that the cloning - will be part of the autograd computation graph, and backpropagation - will traverse the cloned tensor as well.
          -
          -
          Returns:
          -
          A deep clone of this tensor with autograd support.
          -
          -
        • -
        - - - -
          -
        • -

          shallowClone

          -
          default Tsr<V> shallowClone()
          -
          -
          Returns:
          -
          A shallow copy of this tensor with autograd support.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/autograd/ADAction.html b/docs/jdocs/neureka/autograd/ADAction.html index 1016fa819..f878e9104 100644 --- a/docs/jdocs/neureka/autograd/ADAction.html +++ b/docs/jdocs/neureka/autograd/ADAction.html @@ -1,202 +1,312 @@ - + + - -ADAction (neureka 1.0.0 API) - - - - + +ADAction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ADAction

    +
    neureka.autograd
    +

    Interface ADAction

    -
    -
    +
    +
    +
      +
    • +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public interface ADAction
      +
      +
      @FunctionalInterface
      +public interface ADAction
      This interface is the declaration for - lambda actions for both the act(ADTarget) method of the ADAction interface.

      + lambda actions for both the act(ADTarget) method of the ADAction interface.

      Implementations of this perform auto-differentiation forwards or backwards along the computation graph. - These differentiation actions are performed through the "act(ADTarget)" + These differentiation actions are performed through the "act(ADTarget)" method which are being called - by instances of the GraphNode class during propagation. - An ADAction may also wrap and expose a partial derivative + by instances of the GraphNode class during propagation. + An ADAction may also wrap and expose a partial derivative which may or may not be present for certain operations. - Said derivative must be tracked and flagged as derivative by a GraphNode + Said derivative must be tracked and flagged as derivative by a GraphNode to make sure that it will not be deleted after a forward pass.

      - Note: Do not access the GraphNode.getPayload() of the GraphNode + Note: Do not access the GraphNode.getPayload() of the GraphNode passed to implementation of this. The payload is weakly referenced, meaning that this method can return null!

      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        static ADAction of(ADAction action)
        -
        +
          +
        • + + +

          Method Detail

          + + + + + + + + + + + +
            +
          • +

            act

            +
            Tensor<?> act(ADTarget<?> target)
            The auto-differentiation forward or backward pass of an ADAction propagate partial differentiations forward along the computation graph.
            -
            -
            Parameters:
            -
            target - A wrapper for the GraphNode at which the differentiation ought to +
            +
            Parameters:
            +
            target - A wrapper for the GraphNode at which the differentiation ought to be performed and error which ought to be used for the forward or backward differentiation.
            -
            Returns:
            +
            Returns:
            The result of a forward or backward mode auto differentiation.
            -
    • -
    • -
      -

      findCaptured

      -
      default Tensor<?>[] findCaptured()
      -
      Finds captured Tensor instances in this current action +
    + + + +
      +
    • +

      findCaptured

      +
      default Tensor<?>[] findCaptured()
      +
      Finds captured Tensor instances in this current action using reflection (This is usually a partial derivative).
      -
      -
      Returns:
      -
      The captured Tensor instances.
      +
      +
      Returns:
      +
      The captured Tensor instances.
      -
    • -
    • -
      -

      partialDerivative

      -
      default Optional<Tensor<?>> partialDerivative()
      -
      +
    + + + +
      +
    • +

      partialDerivative

      +
      default java.util.Optional<Tensor<?>> partialDerivative()
      +
    • +
    - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/autograd/ADTarget.html b/docs/jdocs/neureka/autograd/ADTarget.html index 682836e19..f6187b326 100644 --- a/docs/jdocs/neureka/autograd/ADTarget.html +++ b/docs/jdocs/neureka/autograd/ADTarget.html @@ -1,165 +1,276 @@ - + + - -ADTarget (neureka 1.0.0 API) - - - - + +ADTarget (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ADTarget<V>

    +
    neureka.autograd
    +

    Class ADTarget<V>

    -
    java.lang.Object -
    neureka.autograd.ADTarget<V>
    -
    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.autograd.ADTarget<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The data type of the tensor.

      -
      public final class ADTarget<V> -extends Object
      +
      +
      public final class ADTarget<V>
      +extends java.lang.Object
      This is simply a wrapper for useful information needed by implementations of - the ADAction and ADAction interfaces to perform error propagation. + the ADAction and ADAction interfaces to perform error propagation. The class exposes the targeted index and graph node of the input towards a provided error should be propagated.
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
       
      -
      int
      - -
       
      - - -
       
      -
      -
      +
    • +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        inputIndex

        -
        public int inputIndex()
        -
        -
        Returns:
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            inputIndex

            +
            public int inputIndex()
            +
            +
            Returns:
            The index of the input targeted for propagation.
            -
      • -
      • -
        -

        node

        -
        public GraphNode<V> node()
        -
        -
        Returns:
        +
      + + + +
        +
      • +

        node

        +
        public GraphNode<V> node()
        +
        +
        Returns:
        The targeted graph node of the tensor towards the error should be propagated.
        -
    • -
    • -
      -

      error

      -
      public Tensor<V> error()
      -
      +
    + + + +
      +
    • +

      error

      +
      public Tensor<V> error()
      +
    • +
    - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/autograd/GraphNode.Print.html b/docs/jdocs/neureka/autograd/GraphNode.Print.html index 84740d6b4..527d7ea12 100644 --- a/docs/jdocs/neureka/autograd/GraphNode.Print.html +++ b/docs/jdocs/neureka/autograd/GraphNode.Print.html @@ -1,229 +1,354 @@ - + + - -GraphNode.Print (neureka 1.0.0 API) - - - - + +GraphNode.Print (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class GraphNode.Print

    -
    -
    java.lang.Object -
    java.lang.Enum<GraphNode.Print> -
    neureka.autograd.GraphNode.Print
    -
    +
    neureka.autograd
    +

    Enum GraphNode.Print

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<GraphNode.Print>
      • +
      • +
          +
        • neureka.autograd.GraphNode.Print
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from class java.lang.Enum

      -Enum.EnumDesc<E extends Enum<E>>
      -
      +
      +
      public static enum GraphNode.Print
      +extends java.lang.Enum<GraphNode.Print>
    • - -
    • -
      -

      Enum Constant Summary

      -
      Enum Constants
      -
      -
      Enum Constant
      -
      Description
      - -
       
      - -
       
      - -
       
      +
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static GraphNode.Print[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static GraphNode.Print[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (GraphNode.Print c : GraphNode.Print.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static GraphNode.Print valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static GraphNode.Print valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/autograd/GraphNode.html b/docs/jdocs/neureka/autograd/GraphNode.html index 6ad0acc33..9e1c34fb6 100644 --- a/docs/jdocs/neureka/autograd/GraphNode.html +++ b/docs/jdocs/neureka/autograd/GraphNode.html @@ -1,91 +1,122 @@ - + + - -GraphNode (neureka 1.0.0 API) - - - - + +GraphNode (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class GraphNode<V>

    +
    neureka.autograd
    +

    Class GraphNode<V>

    -
    java.lang.Object -
    neureka.autograd.GraphNode<V>
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.autograd.GraphNode<V>
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>
      +
      Component<Tensor<V>>

      -
      public class GraphNode<V> -extends Object -implements Component<Tensor<V>>
      -
      Instances of the GraphNode class are components of tensors (Tensor instances) +
      +
      public class GraphNode<V>
      +extends java.lang.Object
      +implements Component<Tensor<V>>
      +
      Instances of the GraphNode class are components of tensors (Tensor instances) which model and record computations / operations between them. - GraphNodes form a computation graph when operations are applied to tensors. + GraphNodes form a computation graph when operations are applied to tensors. This graph can then later on be used for traversal by an important algorithm implemented inside this class, namely: backpropagation. This algorithm is more generally known as reverse mode auto differentiation. @@ -95,311 +126,388 @@

      Class GraphNode<V>

      Children are weakly referenced so that abandoned / detached graph branches (child nodes) can be garbage collected... ...whereas parents are strongly referenced in order to grant successful traversal.
      -
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static enum 
      - -
       
      +
    • +
    -
    -

    Nested classes/interfaces inherited from interface neureka.common.composition.Component

    -Component.IsBeing, Component.OwnerChangeRequest<O>
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        GraphNode

        -
        public GraphNode(Function function, - ExecutionCall<Device<?>> call, - Supplier<Result> payloadSupplier)
        -
        -
        Parameters:
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            GraphNode

            +
            public GraphNode(Function function,
            +                 ExecutionCall<Device<?>> call,
            +                 java.util.function.Supplier<Result> payloadSupplier)
            +
            +
            Parameters:
            function - Is the function that lead to the creation of this node.
            call - The execution call, or null if the node is not a result of an execution call (a leave).
            payloadSupplier - Provides the payload of this node.
            -
      -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      usesAD

      -
      public boolean usesAD()
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          usesAD

          +
          public boolean usesAD()
          This gradient node is involved in auto-differentiation.
          -
          -
          Returns:
          +
          +
          Returns:
          boolean
          -
    • -
    • -
      -

      usesForwardAD

      -
      public boolean usesForwardAD()
      +
    + + + +
      +
    • +

      usesForwardAD

      +
      public boolean usesForwardAD()
      This node propagates forward.
      -
      -
      Returns:
      +
      +
      Returns:
      boolean
      -
  • -
  • -
    -

    usesReverseAD

    -
    public boolean usesReverseAD()
    + + + + +
      +
    • +

      usesReverseAD

      +
      public boolean usesReverseAD()
      This node propagates _backward.
      -
      -
      Returns:
      +
      +
      Returns:
      boolean
      -
  • -
  • -
    -

    isLeave

    -
    public boolean isLeave()
    + + + + +
      +
    • +

      isLeave

      +
      public boolean isLeave()
      This node (and the corresponding tensor) was not created by a function! (it's a leave tensor)
      -
      -
      Returns:
      +
      +
      Returns:
      boolean
      -
  • -
  • -
    -

    isGraphLeave

    -
    public boolean isGraphLeave()
    -
    + + + + +
      +
    • +

      isGraphLeave

      +
      public boolean isGraphLeave()
    • -
    • -
      -

      getPayloadShape

      -
      public List<Integer> getPayloadShape()
      +
    + + + +
      +
    • +

      getPayloadShape

      +
      public java.util.List<java.lang.Integer> getPayloadShape()
      Note: This method will never return null even if the actual payload tensor was garbage collected. - This is because the GraphNode will remember the shape of the tensor.
      -
      -
      Returns:
      -
      The shape of the payload tensor represented by this GraphNode.
      + This is because the GraphNode will remember the shape of the tensor.
  • +
    +
    Returns:
    +
    The shape of the payload tensor represented by this GraphNode.
    - -
  • -
    -

    getPayload

    -
    public Optional<Tensor<V>> getPayload()
    + + + + +
      +
    • +

      getPayload

      +
      public java.util.Optional<Tensor<V>> getPayload()
      The value of a graph node is the tensor to which it belongs (is a component of).

      - Meaning it is the tensor owning this GraphNode component. + Meaning it is the tensor owning this GraphNode component. It is referenced weakly because it might not be needed any more (Not referenced inside AD-Agent for example) and can therefore be garbage collected.

      Warning: This method might return null because the payload is weakly referenced! Meaning that it might get garbage collected.

      -
      -
      Returns:
      +
      +
      Returns:
      The tensor payload of this graph-node.
      -
  • -
  • -
    -

    update

    -
    public boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
    -
    Description copied from interface: Component
    + + + + +
      +
    • +

      update

      +
      public boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
      +
      Description copied from interface: Component
      Components are not the slaves of their owners. If the owner registers any state changes related to a given component, then said component will be informed by the owner about the change as well as receive @@ -409,53 +517,62 @@

      update

      is being added to, or removed from, its current owner. If components hold references to their owners then this method gives them the ability to update said reference when a new owner takes over the components of an old one. - The Component.OwnerChangeRequest implementation instance passed to this method - informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). + The Component.OwnerChangeRequest implementation instance passed to this method + informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). If this method returns false then this means that this component rejects the proposed update. The component owner will then abort the proposed change.
      -
      -
      Specified by:
      -
      update in interface Component<V>
      -
      Parameters:
      -
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      -
      Returns:
      +
      +
      Specified by:
      +
      update in interface Component<Tensor<V>>
      +
      Parameters:
      +
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      +
      Returns:
      The truth value determining if the state change should be aborted or not.
      -
  • -
  • -
    -

    getAndRemovePendingError

    -
    public neureka.autograd.PendingError<V> getAndRemovePendingError()
    + + + + +
      +
    • +

      getAndRemovePendingError

      +
      public neureka.autograd.PendingError<V> getAndRemovePendingError()
      This method is called by the JITProp component. A pending should only ever be retrieved from a GraphNode once because afterward the accumulated error is about to be back-propagated. Therefore, this method nulls the reference when returning the PendingError instance.
      -
      -
      Returns:
      +
      +
      Returns:
      Returns an instance of the PendingError class containing a error accumulation.
      -
  • -
  • -
    -

    backward

    -
    public void backward(Tensor<V> error)
    + + + + +
      +
    • +

      backward

      +
      public void backward(Tensor<V> error)
      This method is the entry-point for the back-propagation process. It sets up a key/value map which stores nodes and their intermediate error accumulations. Accumulation occurs inside the private '_backward' method which traverses the computation graph recursively, halts when errors can be accumulated, adds a PendingError and returns to the method below! Here all the nodes and error values will then be carried (propagated) to the gradients!
      -
      -
      Parameters:
      +
      +
      Parameters:
      error - The current error which is created by multiplying it with current size and traversing it.
      -
  • -
  • -
    -

    backwardJIT

    -
    public void backwardJIT(Tensor<V> error)
    + + + + +
      +
    • +

      backwardJIT

      +
      public void backwardJIT(Tensor<V> error)
      This method is called only when JITProp is active. If an error has accumulated inside a JITProp component and the component is triggered to continue pending backward calls @@ -465,87 +582,105 @@

      backwardJIT

      in order to mark this error source as 'done' so that other JITProp components do not propagate this 'source' node multiple times.
      -
      -
      Parameters:
      +
      +
      Parameters:
      error - The error which ought to be back-propagated just-in-time.
      -
  • -
  • -
    -

    has

    -
    public boolean has(GraphNode<V> target)
    + + + + +
      +
    • +

      has

      +
      public boolean has(GraphNode<V> target)
      This method checks if a given graph node is an AD target of this node. This would mean that this node contains an AD-action for the given GraphNode (target).
      -
      -
      Parameters:
      +
      +
      Parameters:
      target - The targeted derivation graph node reference.
      -
      Returns:
      +
      Returns:
      boolean
      -
  • -
  • -
    -

    size

    -
    public int size()
    + + + + +
      +
    • +

      size

      +
      public int size()
      This is the number of AD-actions stored inside this node. It can be interpreted as the 'number of AD paths'.
      -
      -
      Returns:
      +
      +
      Returns:
      int
      -
  • -
  • -
    -

    forEachDerivative

    -
    public void forEachDerivative(BiConsumer<GraphNode<V>,ADAction> action)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      forEachDerivative

      +
      public void forEachDerivative(java.util.function.BiConsumer<GraphNode<V>,ADAction> action)
      +
      +
      Parameters:
      action - The lambda performing an action on all targeted nodes and their agents.
      -
  • -
  • -
    -

    forEachTarget

    -
    public void forEachTarget(Consumer<GraphNode<V>> action)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      forEachTarget

      +
      public void forEachTarget(java.util.function.Consumer<GraphNode<V>> action)
      +
      +
      Parameters:
      action - An action which should be applied to the graph nodes of all the partial derivatives.
      -
  • -
  • -
    -

    hasDerivatives

    -
    public boolean hasDerivatives()
    -
    -
    Returns:
    + + + + +
      +
    • +

      hasDerivatives

      +
      public boolean hasDerivatives()
      +
      +
      Returns:
      Checks if this node stores target / AD-action (usually derivatives) pairs.
      -
  • -
  • -
    -

    getMode

    -
    public int getMode()
    -
    This is the getter for an important GraphNode property which + + + + +
      +
    • +

      getMode

      +
      public int getMode()
      +
      This is the getter for an important GraphNode property which holds the auto-differentiation mode used by this instance to decide if a given error should be forward propagated backward propagated or not propagated at all. - If the mode is greater than 0, then this means this GraphNode + If the mode is greater than 0, then this means this GraphNode will perform forward propagation. In this case the mode number is also the cumulative number of forward propagation steps - in the tree of source GraphNode instances. + in the tree of source GraphNode instances. If the mode is below 0, then this means this instance will perform reverse mode differentiation (back-propagation). The absolute of a negative mode represents the number of referenced source nodes which have a mode state other than zero. This means that they directly or indirectly reference - a GraphNode instance which represents a Tensor instance - having the Tensor.rqsGradient() flag set to true! + a GraphNode instance which represents a Tensor instance + having the Tensor.rqsGradient() flag set to true!
      Mode state meaning:
      -----------------------------------------------------------
      @@ -555,134 +690,229 @@

      getMode

      -----------------------------------------------------------
      | mode lesser 0 | backward Auto-Differentiation
      -----------------------------------------------------------

      -
      -
      Returns:
      +
      +
      Returns:
      The differentiation mode represented as an integer which encodes 3 distinct states.
      -
  • -
  • -
    -

    isReliesOnJustInTimeProp

    -
    public boolean isReliesOnJustInTimeProp()
    + + + + +
      +
    • +

      isReliesOnJustInTimeProp

      +
      public boolean isReliesOnJustInTimeProp()
      This flag is used for a performance optimization feature namely 'Just In Time Propagation'. This feature accumulates errors and continues propagation as soon as they are needed. (At the end of 'backward()' or when the tensor is used again). - If the flag Neureka.Settings.AutoGrad.isRetainingPendingErrorForJITProp() is set to true + If the flag Neureka.Settings.AutoGrad#isRetainingPendingErrorForJITProp() is set to true then error values will accumulate whenever it makes sense. This technique however uses more memory but will improve performance for some networks substantially.

      All nodes between a Pending-Error and those requiring gradients will be marked with '_relies_on_JIPProp=true'!

      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this graph node relies on just in time propagation.
      -
  • -
  • -
    -

    getPendingError

    -
    public Optional<neureka.autograd.PendingError<V>> getPendingError()
    + + + + +
      +
    • +

      getPendingError

      +
      public java.util.Optional<neureka.autograd.PendingError<V>> getPendingError()
      Used by the Just-In-Time back-prop component.
      -
  • -
  • -
    -

    isUsedAsDerivative

    -
    public boolean isUsedAsDerivative()
    + + + + +
      +
    • +

      isUsedAsDerivative

      +
      public boolean isUsedAsDerivative()
      The chain-rule states that the derivative of f(x) = h(g(x)) with respect to x is: g'(x) * h'(g(x)) An example would be: f(x) = ((x*y)*z) f'(x) = (1*y) * (1*z) = z*y The values z,y or z*y must not be deleted as they are needed for back-propagation!
      -
  • -
  • -
    -

    getFunction

    -
    public Optional<Function> getFunction()
    -
    Recorded Function which produced this GraphNode.
    -
    + + + + +
      +
    • +

      getFunction

      +
      public java.util.Optional<Function> getFunction()
      +
      Recorded Function which produced this GraphNode.
    • -
    • -
      -

      getParents

      -
      public List<GraphNode<V>> getParents()
      -
      +
    + + + +
      +
    • +

      getParents

      +
      public java.util.List<GraphNode<V>> getParents()
    • -
    • -
      -

      getPayloadReferenceVersion

      -
      public int getPayloadReferenceVersion()
      +
    + + + +
      +
    • +

      getPayloadReferenceVersion

      +
      public int getPayloadReferenceVersion()
      This variable holds a copy of the version of the payload tensor recorded when this GraphNode instance is instantiated. It must be treated as final and should never be modified. However, it can be read freely in order to check that the version of the payload hasn't changed.
      -
    • -
    • -
      -

      getPayloadDataType

      -
      public DataType<V> getPayloadDataType()
      -
      +
    + + + + + + + +
      +
    • +

      getChildren

      +
      public java.util.List<java.lang.ref.WeakReference<GraphNode<V>>> getChildren()
      +
      The children are GraphNode instances which represent computations + involving the payload of this very GraphNode instance.
    • -
    • -
      -

      canBeDeleted

      -
      public boolean canBeDeleted()
      -
      +
    + + + +
      +
    • +

      canBeDeleted

      +
      public boolean canBeDeleted()
    • -
    • -
      -

      type

      -
      public String type()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      type

      +
      public java.lang.String type()
      +
      +
      Returns:
      Returns the type of the node as descriptive String in capital letters.
      -
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    • -
    • -
      -

      toString

      -
      public String toString(GraphNode.Print mode)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString(GraphNode.Print mode)
      +
      +
      Parameters:
      mode - The format of the string representation.
      -
      Returns:
      +
      Returns:
      Returns a String representation of this node.
      -
    -
  • - + + +
    + - + + + + diff --git a/docs/jdocs/neureka/autograd/JITProp.html b/docs/jdocs/neureka/autograd/JITProp.html index c73c86fac..fa5ff11b1 100644 --- a/docs/jdocs/neureka/autograd/JITProp.html +++ b/docs/jdocs/neureka/autograd/JITProp.html @@ -1,262 +1,406 @@ - + + - -JITProp (neureka 1.0.0 API) - - - - + +JITProp (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class JITProp<V>

    +
    neureka.autograd
    +

    Class JITProp<V>

    -
    java.lang.Object -
    neureka.autograd.JITProp<V>
    -
    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.autograd.JITProp<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The type parameter of the involved tensors.
      -
      +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>
      +
      Component<Tensor<V>>

      -
      public final class JITProp<V> -extends Object -implements Component<Tensor<V>>
      +
      +
      public final class JITProp<V>
      +extends java.lang.Object
      +implements Component<Tensor<V>>
      This class keeps track of graph nodes which require back-propagation in order to be able to continue the process at a later point in time (based on some configurable conditions).
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        JITProp

        -
        public JITProp(Set<GraphNode<V>> pending)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            JITProp

            +
            public JITProp(java.util.Set<GraphNode<V>> pending)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      addPending

      -
      public void addPending(Set<GraphNode<V>> pending)
      -
      -
      Parameters:
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          addPending

          +
          public void addPending(java.util.Set<GraphNode<V>> pending)
          +
          +
          Parameters:
          pending - A set of GraphNode<V> instance which are saved for future backprop continuation.
          -
    • -
    • -
      -

      noteFinished

      -
      public void noteFinished(GraphNode<V> finishedJITProps)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      noteFinished

      +
      public void noteFinished(GraphNode<V> finishedJITProps)
      +
      +
      Parameters:
      finishedJITProps - The reference to a GraphNote which has finished (JITed) backpropation.
      -
  • -
  • -
    -

    finishedCount

    -
    public int finishedCount()
    -
    + + + + +
      +
    • +

      finishedCount

      +
      public int finishedCount()
    • -
    • -
      -

      pendingCount

      -
      public int pendingCount()
      -
      +
    + + + +
      +
    • +

      pendingCount

      +
      public int pendingCount()
    • -
    • -
      -

      execute

      -
      public void execute()
      +
    + + + +
      +
    • +

      execute

      +
      public void execute()
      This method triggers the continuation of the back-propagation which has been put on hold by saving the pending graph nodes inside this class.
      The execution request happens when gradients are immediately required by a tensor, which is the case when the tensor is about to apply its gradients.
      However because the gradient has not yet been fully calculated this method will be called first (assuming the tensor has a JITProp component stored).
      -
    • -
    • -
      -

      isDone

      -
      public boolean isDone()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      isDone

      +
      public boolean isDone()
      +
      +
      Returns:
      The truth value determining if the back-propagation has been completed.
      -
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/autograd/package-frame.html b/docs/jdocs/neureka/autograd/package-frame.html new file mode 100644 index 000000000..0631549a8 --- /dev/null +++ b/docs/jdocs/neureka/autograd/package-frame.html @@ -0,0 +1,29 @@ + + + + + +neureka.autograd (neureka 1.0.1 API) + + + + +

    neureka.autograd

    +
    +

    Interfaces

    + +

    Classes

    + +

    Enums

    + +
    + + diff --git a/docs/jdocs/neureka/autograd/package-summary.html b/docs/jdocs/neureka/autograd/package-summary.html index 85d4c6b43..fefa65846 100644 --- a/docs/jdocs/neureka/autograd/package-summary.html +++ b/docs/jdocs/neureka/autograd/package-summary.html @@ -1,121 +1,191 @@ - + + - -neureka.autograd (neureka 1.0.0 API) - - - - + +neureka.autograd (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.autograd

    -
    -
    -
    package neureka.autograd
    -
    -
      -
    • - -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      +

      Package neureka.autograd

      +
      +
      +
        +
      • + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        ADAction
        This interface is the declaration for - lambda actions for both the ADAction.act(ADTarget) method of the ADAction interface.
        - - -
        + lambda actions for both the ADAction.act(ADTarget) method of the ADAction interface.
        +
        +
      • +
      • + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        ADTarget<V>
        This is simply a wrapper for useful information needed by implementations of - the ADAction and ADAction interfaces to perform error propagation.
        - - -
        -
        Instances of the GraphNode class are components of tensors (Tensor instances) + the ADAction and ADAction interfaces to perform error propagation.
        +
        GraphNode<V> +
        Instances of the GraphNode class are components of tensors (Tensor instances) which model and record computations / operations between them.
        - - -
         
        - -
        +
        JITProp<V>
        This class keeps track of graph nodes which require back-propagation in order to be able to continue the process at a later point in time (based on some configurable conditions).
        - - - - +
        +
      • +
      • + + + + + + + + + + + + +
        Enum Summary 
        EnumDescription
        GraphNode.Print 
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/autograd/package-tree.html b/docs/jdocs/neureka/autograd/package-tree.html index 4f10b7819..b8078208a 100644 --- a/docs/jdocs/neureka/autograd/package-tree.html +++ b/docs/jdocs/neureka/autograd/package-tree.html @@ -1,93 +1,152 @@ - + + - -neureka.autograd Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.autograd Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.autograd

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/Algorithm.html b/docs/jdocs/neureka/backend/api/Algorithm.html index 2e2073467..bb7499d53 100644 --- a/docs/jdocs/neureka/backend/api/Algorithm.html +++ b/docs/jdocs/neureka/backend/api/Algorithm.html @@ -1,196 +1,312 @@ - + + - -Algorithm (neureka 1.0.0 API) - - - - + +Algorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Algorithm

    +
    neureka.backend.api
    +

    Interface Algorithm

    -
    -
    +
    +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        withName

        -
        static FunAlgorithm withName(String name)
        -
        This is a factory method for creating a new instance of this FunAlgorithm class.
        -
        -
        Parameters:
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            withName

            +
            static FunAlgorithm withName(java.lang.String name)
            +
            This is a factory method for creating a new instance of this FunAlgorithm class.
            +
            +
            Parameters:
            name - The name of the functional algorithm.
            -
            Returns:
            -
            A new FunAlgorithm with the provided name.
            +
            Returns:
            +
            A new FunAlgorithm with the provided name.
            -
      • -
      • -
        -

        getName

        -
        String getName()
        -
        The name of an Algorithm may be used for OpenCL kernel compilation or simply +
      + + + +
        +
      • +

        getName

        +
        java.lang.String getName()
        +
        The name of an Algorithm may be used for OpenCL kernel compilation or simply for debugging purposes to identify which type of algorithm is being executed at any given time...
        -
        -
        Returns:
        -
        The name of this Algorithm.
        +
        +
        Returns:
        +
        The name of this Algorithm.
        -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/AutoDiffMode.html b/docs/jdocs/neureka/backend/api/AutoDiffMode.html index 0d3fcc7aa..636bc7ea2 100644 --- a/docs/jdocs/neureka/backend/api/AutoDiffMode.html +++ b/docs/jdocs/neureka/backend/api/AutoDiffMode.html @@ -1,251 +1,388 @@ - + + - -AutoDiffMode (neureka 1.0.0 API) - - - - + +AutoDiffMode (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class AutoDiffMode

    -
    -
    java.lang.Object -
    java.lang.Enum<AutoDiffMode> -
    neureka.backend.api.AutoDiffMode
    -
    +
    neureka.backend.api
    +

    Enum AutoDiffMode

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<AutoDiffMode>
      • +
      • +
          +
        • neureka.backend.api.AutoDiffMode
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
      +
    • + + -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      - -
       
      -
      boolean
      - -
       
      - - -
      -
      Returns the enum constant of this class with the specified name.
      -
      -
      static AutoDiffMode[]
      - -
      -
      Returns an array containing the constants of this enum class, in +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanallowsBackward() 
        booleanallowsForward() 
        static AutoDiffModevalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static AutoDiffMode[]values() +
        Returns an array containing the constants of this enum type, in the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    -
    -
    -
    - -
    -

    Methods inherited from class java.lang.Object

    -getClass, notify, notifyAll, wait, wait, wait
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Enum Constant Details

        -
          -
        • -
          -

          FORWARD_ONLY

          -
          public static final AutoDiffMode FORWARD_ONLY
          -
          +
            +
          • + + +

            Enum Constant Detail

            + + + +
              +
            • +

              FORWARD_ONLY

              +
              public static final AutoDiffMode FORWARD_ONLY
            • -
            • -
              -

              BACKWARD_ONLY

              -
              public static final AutoDiffMode BACKWARD_ONLY
              -
              +
            + + + +
              +
            • +

              BACKWARD_ONLY

              +
              public static final AutoDiffMode BACKWARD_ONLY
            • -
            • -
              -

              FORWARD_AND_BACKWARD

              -
              public static final AutoDiffMode FORWARD_AND_BACKWARD
              -
              +
            + + + +
              +
            • +

              FORWARD_AND_BACKWARD

              +
              public static final AutoDiffMode FORWARD_AND_BACKWARD
            • -
            • -
              -

              NOT_SUPPORTED

              -
              public static final AutoDiffMode NOT_SUPPORTED
              -
              +
            + + + +
              +
            • +

              NOT_SUPPORTED

              +
              public static final AutoDiffMode NOT_SUPPORTED
            -
      • +
      -
    • -
      -

      Method Details

      -
        -
      • -
        -

        values

        -
        public static AutoDiffMode[] values()
        -
        Returns an array containing the constants of this enum class, in -the order they are declared.
        -
        -
        Returns:
        -
        an array containing the constants of this enum class, in the order they are declared
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            values

            +
            public static AutoDiffMode[] values()
            +
            Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
            +for (AutoDiffMode c : AutoDiffMode.values())
            +    System.out.println(c);
            +
            +
            +
            Returns:
            +
            an array containing the constants of this enum type, in the order they are declared
            -
      • -
      • -
        -

        valueOf

        -
        public static AutoDiffMode valueOf(String name)
        -
        Returns the enum constant of this class with the specified name. +
      + + + +
        +
      • +

        valueOf

        +
        public static AutoDiffMode valueOf(java.lang.String name)
        +
        Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
        -
        -
        Parameters:
        +
        +
        Parameters:
        name - the name of the enum constant to be returned.
        -
        Returns:
        +
        Returns:
        the enum constant with the specified name
        -
        Throws:
        -
        IllegalArgumentException - if this enum class has no constant with the specified name
        -
        NullPointerException - if the argument is null
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
        +
        java.lang.NullPointerException - if the argument is null
        -
    • -
    • -
      -

      allowsForward

      -
      public boolean allowsForward()
      -
      +
    + + + +
      +
    • +

      allowsForward

      +
      public boolean allowsForward()
    • -
    • -
      -

      allowsBackward

      -
      public boolean allowsBackward()
      -
      +
    + + + +
      +
    • +

      allowsBackward

      +
      public boolean allowsBackward()
      +
    • +
    -
    - + + - + + + + diff --git a/docs/jdocs/neureka/backend/api/BackendContext.Runner.html b/docs/jdocs/neureka/backend/api/BackendContext.Runner.html index 2845b05dd..fc2585595 100644 --- a/docs/jdocs/neureka/backend/api/BackendContext.Runner.html +++ b/docs/jdocs/neureka/backend/api/BackendContext.Runner.html @@ -1,244 +1,359 @@ - + + - -BackendContext.Runner (neureka 1.0.0 API) - - - - + +BackendContext.Runner (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class BackendContext.Runner

    -
    -
    java.lang.Object -
    neureka.backend.api.BackendContext.Runner
    +
    neureka.backend.api
    +

    Class BackendContext.Runner

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.BackendContext.Runner
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      BackendContext
      +
      BackendContext

      -
      public static class BackendContext.Runner -extends Object
      +
      +
      public static class BackendContext.Runner
      +extends java.lang.Object
      This is a very simple class with a single purpose, namely it exposes methods which receive lambda instances in order to then execute them - in a given BackendContext, just to then switch back to the original context again. - Switching a context simply means that the BackendContext which produced this BackendContext.Runner + in a given BackendContext, just to then switch back to the original context again. + Switching a context simply means that the BackendContext which produced this BackendContext.Runner will temporarily be set as execution context for the current thread - local Neureka instance.

      + local Neureka instance.

      - A BackendContext.Runner wraps both the called context as well as the context of the caller in order - to perform this temporary context switching throughout the execution of the lambdas passed to the BackendContext.Runner. + A BackendContext.Runner wraps both the called context as well as the context of the caller in order + to perform this temporary context switching throughout the execution of the lambdas passed to the BackendContext.Runner. After a given lambda was executed, the original context will be restored in the current thread - local Neureka instance through the Neureka.setBackend(BackendContext)) method.
      -
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        <T> Tcall(java.util.function.Supplier<T> contextSpecificAction) +
        Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance.
        +
        <T> Tinvoke(java.util.function.Supplier<T> contextSpecificAction) +
        Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance.
        +
        BackendContext.Runnerrun(java.lang.Runnable contextSpecificAction) +
        Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance.
        +
        <T> TrunAndGet(java.util.function.Supplier<T> contextSpecificAction) +
        Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        runAndGet

        +
        public <T> T runAndGet(java.util.function.Supplier<T> contextSpecificAction)
        +
        Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance. + After the lambda finished execution successfully the original BackendContext will be restored. + This method distinguishes itself from the run(Runnable) method because the lambda supplied to this method is expected to return something. What may be returned is up to the user, one might want to return the result of a tensor operation which might be exclusively available in the used context.
        -
        -
        Type Parameters:
        +
        +
        Type Parameters:
        T - The return type of the supplied context action which will also be returned by this method.
        -
        Parameters:
        -
        contextSpecificAction - The context specific action which will be execute in the BackendContext which produced this BackendContext.Runner.
        -
        Returns:
        +
        Parameters:
        +
        contextSpecificAction - The context specific action which will be execute in the BackendContext which produced this BackendContext.Runner.
        +
        Returns:
        The result of the supplied context action.
        -
    • -
    • -
      -

      call

      -
      public <T> T call(Supplier<T> contextSpecificAction)
      -
      Use this method to supply a lambda which will be executed in the BackendContext - which produced this very BackendContext.Runner instance. - After the lambda finished execution successfully the original BackendContext will be restored. - This method distinguishes itself from the run(Runnable) method because the +
    + + + +
      +
    • +

      call

      +
      public <T> T call(java.util.function.Supplier<T> contextSpecificAction)
      +
      Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance. + After the lambda finished execution successfully the original BackendContext will be restored. + This method distinguishes itself from the run(Runnable) method because the lambda supplied to this method is expected to return something.
      What may be returned is up to the user, one might want to return the result of a tensor operation which might be exclusively available in the used context. - This method is doing the exact same thing as the runAndGet(Supplier) method, + This method is doing the exact same thing as the runAndGet(Supplier) method, however its name is shorter and it can even be omitted entirely when using Groovy.

      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The return type of the supplied context action which will also be returned by this method.
      -
      Parameters:
      -
      contextSpecificAction - The context specific action which will be execute in the BackendContext which produced this BackendContext.Runner.
      -
      Returns:
      +
      Parameters:
      +
      contextSpecificAction - The context specific action which will be execute in the BackendContext which produced this BackendContext.Runner.
      +
      Returns:
      The result of the supplied context action.
      -
    • -
    • -
      -

      invoke

      -
      public <T> T invoke(Supplier<T> contextSpecificAction)
      -
      Use this method to supply a lambda which will be executed in the BackendContext - which produced this very BackendContext.Runner instance. - After the lambda finished execution successfully the original BackendContext will be restored. - This method distinguishes itself from the run(Runnable) method because the +
    + + + +
      +
    • +

      invoke

      +
      public <T> T invoke(java.util.function.Supplier<T> contextSpecificAction)
      +
      Use this method to supply a lambda which will be executed in the BackendContext + which produced this very BackendContext.Runner instance. + After the lambda finished execution successfully the original BackendContext will be restored. + This method distinguishes itself from the run(Runnable) method because the lambda supplied to this method is expected to return something.
      What may be returned is up to the user, one might want to return the result of a tensor operation which might be exclusively available in the used context. - This method is doing the exact same thing as the runAndGet(Supplier) method, + This method is doing the exact same thing as the runAndGet(Supplier) method, however its name is shorter and it can even be omitted entirely when using Kotlin.

      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The return type of the supplied context action which will also be returned by this method.
      -
      Parameters:
      -
      contextSpecificAction - The context specific action which will be execute in the BackendContext which produced this BackendContext.Runner.
      -
      Returns:
      +
      Parameters:
      +
      contextSpecificAction - The context specific action which will be execute in the BackendContext which produced this BackendContext.Runner.
      +
      Returns:
      The result of the supplied context action.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/BackendContext.html b/docs/jdocs/neureka/backend/api/BackendContext.html index 6dcdc15e8..0e172556b 100644 --- a/docs/jdocs/neureka/backend/api/BackendContext.html +++ b/docs/jdocs/neureka/backend/api/BackendContext.html @@ -1,523 +1,709 @@ - + + - -BackendContext (neureka 1.0.0 API) - - - - + +BackendContext (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class BackendContext

    +
    neureka.backend.api
    +

    Class BackendContext

    -
    java.lang.Object -
    neureka.backend.api.BackendContext
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.BackendContext
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Cloneable
      +
      java.lang.Cloneable

      -
      public final class BackendContext -extends Object -implements Cloneable
      -
      Instances of this class are execution contexts hosting Operation instances which receive Tensor +
      +
      public final class BackendContext
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Instances of this class are execution contexts hosting Operation instances which receive Tensor instances for execution. - BackendContexts are managed by Neureka, a (thread-local) Singleton / Multiton library context.
      + BackendContexts are managed by Neureka, a (thread-local) Singleton / Multiton library context.
      Contexts are cloneable for testing purposes and to enable extending the backend dynamically. - A given instance also hosts a reference to a Functions instance which exposes commonly used - pre-instantiated Function implementation instances. + A given instance also hosts a reference to a Functions instance which exposes commonly used + pre-instantiated Function implementation instances.

      - The BackendContext initializes and stores Operation instances in various data structures - for fast access and querying (Mostly used by the ParseUtil and FunctionParser). + The BackendContext initializes and stores Operation instances in various data structures + for fast access and querying (Mostly used by the ParseUtil and FunctionParser).
      - Operations are stored in simple list and map collections, + Operations are stored in simple list and map collections, namely:
      The "_instances" list and the "_lookup" map as declared below.

      - During class initialization concrete classes extending the Operation class - are being instantiated in the static block below via a ServiceLoader. - BackendContext instances expose a useful class called BackendContext.Runner, + During class initialization concrete classes extending the Operation class + are being instantiated in the static block below via a ServiceLoader. + BackendContext instances expose a useful class called BackendContext.Runner, which performs temporary context switching between the caller's context and this context during the execution of provided lambdas.
      -
    -
    -
      + +
    +
    +
    + + + + +
      +
    • +

      getOperationLookupMap

      +
      public java.util.Map<java.lang.String,Operation> getOperationLookupMap()
      +
      This method returns an unmodifiable view of the mapping between the Operation.getIdentifier() / Operation.getOperator() properties + and the Operation implementation instances to which they belong. Query operations on the returned map "read through" to the specified map, and attempts to modify the returned map, whether direct or via its collection views, - result in an UnsupportedOperationException.
      -
      -
      Returns:
      -
      An unmodifiable mapping of Operation properties to the Operation instances to which they belong.
      + result in an UnsupportedOperationException.
    +
    +
    Returns:
    +
    An unmodifiable mapping of Operation properties to the Operation instances to which they belong.
    - -
  • -
    -

    getOperations

    -
    public List<Operation> getOperations()
    + + + + +
      +
    • +

      getOperations

      +
      public java.util.List<Operation> getOperations()
      This method returns an unmodifiable view of the - list of Operation implementation instances managed by this context. + list of Operation implementation instances managed by this context. Query operations on the returned map "read through" to the specified map, and attempts to modify the returned map, whether direct or via its collection views, - result in an UnsupportedOperationException.
      -
      -
      Returns:
      -
      An unmodifiable view of the list of Operation implementation instances managed by this context
      + result in an UnsupportedOperationException.
  • +
    +
    Returns:
    +
    An unmodifiable view of the list of Operation implementation instances managed by this context
    - -
  • -
    -

    size

    -
    public int size()
    -
    -
    Returns:
    -
    The number of Operation instances stored on this BackendContext.
    + + + + +
  • -
  • -
    -

    getFunctionCache

    -
    public FunctionCache getFunctionCache()
    -
    -
    Returns:
    -
    The Function and Tensor cache of this BackendContext
    + + + + +
  • -
  • -
    -

    getFunction

    -
    public Functions getFunction()
    -
    This method returns a Functions instance which wraps pre-instantiated - Function instances which are configured to not track their computational history. + + + + +
      +
    • +

      getFunction

      +
      public Functions getFunction()
      +
      This method returns a Functions instance which wraps pre-instantiated + Function instances which are configured to not track their computational history. This means that no computation graph will be built by these instances. - ( Computation graphs in Neureka are made of instances of the GraphNode class... )
      -
    + ( Computation graphs in Neureka are made of instances of the GraphNode class... )
  • -
  • -
    -

    getAutogradFunction

    -
    public Functions getAutogradFunction()
    -
    This method returns a Functions instance which wraps pre-instantiated - Function instances which are configured to track their computational history. + + + + +
      +
    • +

      getAutogradFunction

      +
      public Functions getAutogradFunction()
      +
      This method returns a Functions instance which wraps pre-instantiated + Function instances which are configured to track their computational history. This means that a computation graph will be built by these instances. - ( Computation graphs in Neureka are made of instances of the GraphNode class... )
      -
      -
      Returns:
      + ( Computation graphs in Neureka are made of instances of the GraphNode class... )
    +
    +
    Returns:
    A container object which exposes various types of functions with autograd support.
    -
  • -
  • -
    -

    addOperation

    -
    public BackendContext addOperation(Operation operation)
    -
    This method registers Operation implementation instances in this BackendContext - which is the thread local execution context receiving and processing Tensor instances...

    -
    -
    Parameters:
    -
    operation - The Operation instance which ought to be registered as part of this execution context.
    -
    Returns:
    + + + + +
      +
    • +

      addOperation

      +
      public BackendContext addOperation(Operation operation)
      +
      This method registers Operation implementation instances in this BackendContext + which is the thread local execution context receiving and processing Tensor instances...

      +
      +
      Parameters:
      +
      operation - The Operation instance which ought to be registered as part of this execution context.
      +
      Returns:
      This very context instance to allow for method chaining.
      -
  • -
  • -
    -

    hasOperation

    -
    public boolean hasOperation(Operation operation)
    -
    -
    Parameters:
    -
    operation - The Operation which may or may not be part of this BackendContext.
    -
    Returns:
    -
    The truth value determining if the provided Operation is part of this BackendContext.
    + + + + +
  • -
  • -
    -

    hasOperation

    -
    public boolean hasOperation(String operationIdentifier)
    -
    -
    Parameters:
    -
    operationIdentifier - The Operation identifier which may be the function name or operator if present.
    -
    Returns:
    -
    The truth value determining if the provided Operation is part of this BackendContext.
    + + + + +
      +
    • +

      hasOperation

      +
      public boolean hasOperation(java.lang.String operationIdentifier)
      +
      +
      Parameters:
      +
      operationIdentifier - The Operation identifier which may be the function name or operator if present.
      +
      Returns:
      +
      The truth value determining if the provided Operation is part of this BackendContext.
      -
  • -
  • -
    -

    getOperation

    -
    public Operation getOperation(int index)
    -
    This method queries the operations in this BackendContext - by a provided index integer targeting an entry in the list of Operation implementation instances + + + + +
      +
    • +

      getOperation

      +
      public Operation getOperation(int index)
      +
      This method queries the operations in this BackendContext + by a provided index integer targeting an entry in the list of Operation implementation instances sitting in this execution context.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The index of the operation.
      -
      Returns:
      +
      Returns:
      The found Operation instance or null.
      -
  • -
  • -
    -

    getOperation

    -
    public Operation getOperation(String identifier)
    + + + + +
      +
    • +

      getOperation

      +
      public Operation getOperation(java.lang.String identifier)
      This method queries the operations in this BackendContext by a provided identifier which has to match the name of an existing operation.
      -
      -
      Parameters:
      +
      +
      Parameters:
      identifier - The operation identifier, aka: its name.
      -
      Returns:
      +
      Returns:
      The requested Operation or null.
      -
  • -
  • -
    -

    clone

    -
    public BackendContext clone()
    -
    This method produces a shallow copy of this BackendContext. + + + + +
      +
    • +

      clone

      +
      public BackendContext clone()
      +
      This method produces a shallow copy of this BackendContext. This is useful for debugging, testing and extending contexts during runtime without side effects!
      -
      -
      Overrides:
      -
      clone in class Object
      -
      Returns:
      +
      +
      Overrides:
      +
      clone in class java.lang.Object
      +
      Returns:
      A shallow copy of this operation / execution context.
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • -
  • -
    -

    has

    -
    public <E extends BackendExtension> boolean has(Class<E> extensionClass)
    -
    Checks if this context has an instance of the provided BackendExtension type.
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      has

      +
      public <E extends BackendExtension> boolean has(java.lang.Class<E> extensionClass)
      +
      Checks if this context has an instance of the provided BackendExtension type.
      +
      +
      Type Parameters:
      E - The type parameter of the provided type class which requires the type to be an extension.
      -
      Parameters:
      +
      Parameters:
      extensionClass - The type class of the extensions whose presents should be checked.
      -
      Returns:
      +
      Returns:
      The truth value determining if the provided type is present.
      -
  • -
  • -
    -

    find

    -
    public <E extends BackendExtension> Optional<E> find(Class<E> componentClass)
    -
    Returns an Optional instance of the provided BackendExtension type - or an empty Optional if no extension of that type was found.
    -
    + + + + +
      +
    • +

      find

      +
      public <E extends BackendExtension> java.util.Optional<E> find(java.lang.Class<E> componentClass)
      +
      Returns an Optional instance of the provided BackendExtension type + or an empty Optional if no extension of that type was found.
    • -
    • -
      -

      getExtensions

      -
      public List<BackendExtension> getExtensions()
      -
      -
      Returns:
      -
      A list of all BackendExtension instances.
      +
    + + + + + + + + -
  • - + + + + - + + + + diff --git a/docs/jdocs/neureka/backend/api/BackendExtension.DeviceOption.html b/docs/jdocs/neureka/backend/api/BackendExtension.DeviceOption.html index b8e96c606..5b954d40f 100644 --- a/docs/jdocs/neureka/backend/api/BackendExtension.DeviceOption.html +++ b/docs/jdocs/neureka/backend/api/BackendExtension.DeviceOption.html @@ -1,184 +1,298 @@ - + + - -BackendExtension.DeviceOption (neureka 1.0.0 API) - - - - + +BackendExtension.DeviceOption (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class BackendExtension.DeviceOption

    +
    neureka.backend.api
    +

    Class BackendExtension.DeviceOption

    -
    java.lang.Object -
    neureka.backend.api.BackendExtension.DeviceOption
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.BackendExtension.DeviceOption
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      DeviceOption(Device<?> device, - double confidence)
      -
       
      +
      +
      public static class BackendExtension.DeviceOption
      +extends java.lang.Object
      +
      This class describes an available Device implementation found for a given BackendExtension. + It exists because a typical BackendExtension will most likely also have a + custom Device implementation exposing a specific API for executing tensors on them...
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        DeviceOption

        -
        public DeviceOption(Device<?> device, - double confidence)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            DeviceOption

            +
            public DeviceOption(Device<?> device,
            +                    double confidence)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      device

      -
      public Device<?> device()
      -
      -
      Returns:
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          device

          +
          public Device<?> device()
          +
          +
          Returns:
          The device which fits a given key word best.
          -
    • -
    • -
      -

      confidence

      -
      public double confidence()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      confidence

      +
      public double confidence()
      +
      +
      Returns:
      The confidence level determining how well a given search key matches the wrapped device.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/BackendExtension.html b/docs/jdocs/neureka/backend/api/BackendExtension.html index 77215cb8e..d66680a83 100644 --- a/docs/jdocs/neureka/backend/api/BackendExtension.html +++ b/docs/jdocs/neureka/backend/api/BackendExtension.html @@ -1,209 +1,327 @@ - + + - -BackendExtension (neureka 1.0.0 API) - - - - + +BackendExtension (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface BackendExtension

    +
    neureka.backend.api
    +

    Interface BackendExtension

    -
    -
    +
    +
    +
    -
    -
    -
    -

    Nested classes/interfaces inherited from interface neureka.common.composition.Component

    -Component.IsBeing, Component.OwnerChangeRequest<O>
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        find

        - -
        The BackendContext does not handle Device instances directly. - Instead, the task of instantiating and exposing Device implementations - should be carried by BackendExtension implementations. +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            find

            +
            BackendExtension.DeviceOption find(java.lang.String searchKey)
            +
            The BackendContext does not handle Device instances directly. + Instead, the task of instantiating and exposing Device implementations + should be carried by BackendExtension implementations. One extension might be implementing CUDA operations, - therefore, the extension should also deal with some sort of CUDADevice implementation.
            -
            -
            Parameters:
            -
            searchKey - The search key used to find a suitable Device implementation in this extension.
            -
            Returns:
            -
            A suitable BackendExtension.DeviceOption or null if nothing was found.
            + therefore, the extension should also deal with some sort of CUDADevice implementation.
        +
        +
        Parameters:
        +
        searchKey - The search key used to find a suitable Device implementation in this extension.
        +
        Returns:
        +
        A suitable BackendExtension.DeviceOption or null if nothing was found.
        -
      • -
      • -
        -

        reset

        -
        default void reset()
        -
        This will indirectly be called through the Neureka.reset() method, +
      + + + +
        +
      • +

        reset

        +
        default void reset()
        +
        This will indirectly be called through the Neureka.reset() method, which is responsible for resetting the library settings.
        -
    • -
    • -
      -

      dispose

      -
      void dispose()
      +
    + + + +
      +
    • +

      dispose

      +
      void dispose()
      Tells this extension to dispose itself. - One should not use a BackendExtension after it was disposed!
      - + One should not use a BackendExtension after it was disposed!
    + + + + + + - - +
    + - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.Builder.html b/docs/jdocs/neureka/backend/api/Call.Builder.html index 2fde64b67..47cdcbd5e 100644 --- a/docs/jdocs/neureka/backend/api/Call.Builder.html +++ b/docs/jdocs/neureka/backend/api/Call.Builder.html @@ -1,163 +1,278 @@ - + + - -Call.Builder (neureka 1.0.0 API) - - - - + +Call.Builder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Call.Builder<V,T extends Device<V>>

    +
    neureka.backend.api
    +

    Class Call.Builder<V,T extends Device<V>>

    -
    java.lang.Object -
    neureka.backend.api.Call.Builder<V,T>
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.Call.Builder<V,T>
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static class Call.Builder<V,T extends Device<V>> -extends Object
      -
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    + - -
    -
      +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.DeviceCondition.html b/docs/jdocs/neureka/backend/api/Call.DeviceCondition.html index 6bea9c787..bf794dc7b 100644 --- a/docs/jdocs/neureka/backend/api/Call.DeviceCondition.html +++ b/docs/jdocs/neureka/backend/api/Call.DeviceCondition.html @@ -1,128 +1,222 @@ - + + - -Call.DeviceCondition (neureka 1.0.0 API) - - - - + +Call.DeviceCondition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Call.DeviceCondition

    +
    neureka.backend.api
    +

    Interface Call.DeviceCondition

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static interface Call.DeviceCondition
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      -
      check(Device<?> device)
      -
       
      -
      -
      +
      +
      public static interface Call.DeviceCondition
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        check

        -
        boolean check(Device<?> device)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            check

            +
            boolean check(Device<?> device)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.Else.html b/docs/jdocs/neureka/backend/api/Call.Else.html index 00683a9ce..de1c28a64 100644 --- a/docs/jdocs/neureka/backend/api/Call.Else.html +++ b/docs/jdocs/neureka/backend/api/Call.Else.html @@ -1,128 +1,224 @@ - + + - -Call.Else (neureka 1.0.0 API) - - - - + +Call.Else (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Call.Else<T>

    +
    neureka.backend.api
    +

    Interface Call.Else<T>

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static interface Call.Else<T>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      orElse(T value)
      -
       
      -
      -
      +
      +
      public static interface Call.Else<T>
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        orElse

        -
        T orElse(T value)
        -
        + -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.OperationCondition.html b/docs/jdocs/neureka/backend/api/Call.OperationCondition.html index 1902fa85e..4b4f6ac7b 100644 --- a/docs/jdocs/neureka/backend/api/Call.OperationCondition.html +++ b/docs/jdocs/neureka/backend/api/Call.OperationCondition.html @@ -1,128 +1,222 @@ - + + - -Call.OperationCondition (neureka 1.0.0 API) - - - - + +Call.OperationCondition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Call.OperationCondition

    +
    neureka.backend.api
    +

    Interface Call.OperationCondition

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static interface Call.OperationCondition
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      - -
       
      -
      -
      +
      +
      public static interface Call.OperationCondition
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.TensorCompare.html b/docs/jdocs/neureka/backend/api/Call.TensorCompare.html index 4bbf7791f..1887c02f6 100644 --- a/docs/jdocs/neureka/backend/api/Call.TensorCompare.html +++ b/docs/jdocs/neureka/backend/api/Call.TensorCompare.html @@ -1,130 +1,224 @@ - + + - -Call.TensorCompare (neureka 1.0.0 API) - - - - + +Call.TensorCompare (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Call.TensorCompare

    +
    neureka.backend.api
    +

    Interface Call.TensorCompare

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static interface Call.TensorCompare
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      -
      check(Tensor<?> first, - Tensor<?> second)
      -
       
      -
      -
      +
      +
      public static interface Call.TensorCompare
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        check

        -
        boolean check(Tensor<?> first, - Tensor<?> second)
        -
        + -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.TensorCondition.html b/docs/jdocs/neureka/backend/api/Call.TensorCondition.html index d49f5dd6c..61342cef0 100644 --- a/docs/jdocs/neureka/backend/api/Call.TensorCondition.html +++ b/docs/jdocs/neureka/backend/api/Call.TensorCondition.html @@ -1,128 +1,222 @@ - + + - -Call.TensorCondition (neureka 1.0.0 API) - - - - + +Call.TensorCondition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Call.TensorCondition

    +
    neureka.backend.api
    +

    Interface Call.TensorCondition

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static interface Call.TensorCondition
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      -
      check(Tensor<?> tensor)
      -
       
      -
      -
      +
      +
      public static interface Call.TensorCondition
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        check

        -
        boolean check(Tensor<?> tensor)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            check

            +
            boolean check(Tensor<?> tensor)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.TensorProperty.html b/docs/jdocs/neureka/backend/api/Call.TensorProperty.html index 4123ec386..dd0d62795 100644 --- a/docs/jdocs/neureka/backend/api/Call.TensorProperty.html +++ b/docs/jdocs/neureka/backend/api/Call.TensorProperty.html @@ -1,128 +1,222 @@ - + + - -Call.TensorProperty (neureka 1.0.0 API) - - - - + +Call.TensorProperty (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Call.TensorProperty

    +
    neureka.backend.api
    +

    Interface Call.TensorProperty

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static interface Call.TensorProperty
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      propertyOf(Tensor<?> tensor)
      -
       
      -
      -
      +
      +
      public static interface Call.TensorProperty
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        propertyOf

        -
        Object propertyOf(Tensor<?> tensor)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            propertyOf

            +
            java.lang.Object propertyOf(Tensor<?> tensor)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.TensorsCondition.html b/docs/jdocs/neureka/backend/api/Call.TensorsCondition.html index 0640b7203..c98b222b1 100644 --- a/docs/jdocs/neureka/backend/api/Call.TensorsCondition.html +++ b/docs/jdocs/neureka/backend/api/Call.TensorsCondition.html @@ -1,128 +1,222 @@ - + + - -Call.TensorsCondition (neureka 1.0.0 API) - - - - + +Call.TensorsCondition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Call.TensorsCondition

    +
    neureka.backend.api
    +

    Interface Call.TensorsCondition

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public static interface Call.TensorsCondition
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      -
      check(Tensor<?>[] tensors)
      -
       
      -
      -
      +
      +
      public static interface Call.TensorsCondition
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        check

        -
        boolean check(Tensor<?>[] tensors)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            check

            +
            boolean check(Tensor<?>[] tensors)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.Validator.Estimator.html b/docs/jdocs/neureka/backend/api/Call.Validator.Estimator.html index 7efe29cd6..88e49a672 100644 --- a/docs/jdocs/neureka/backend/api/Call.Validator.Estimator.html +++ b/docs/jdocs/neureka/backend/api/Call.Validator.Estimator.html @@ -1,234 +1,376 @@ - + + - -Call.Validator.Estimator (neureka 1.0.0 API) - - - - + +Call.Validator.Estimator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Call.Validator.Estimator

    +
    neureka.backend.api
    +

    Class Call.Validator.Estimator

    -
    java.lang.Object -
    neureka.backend.api.Call.Validator.Estimator
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.Call.Validator.Estimator
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      Estimator(boolean isValid)
      -
       
      +
      +
      public class Call.Validator.Estimator
      +extends java.lang.Object
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Estimator

        -
        public Estimator(boolean isValid)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Estimator

            +
            public Estimator(boolean isValid)
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.Validator.html b/docs/jdocs/neureka/backend/api/Call.Validator.html index 696e22c91..8a9162d4d 100644 --- a/docs/jdocs/neureka/backend/api/Call.Validator.html +++ b/docs/jdocs/neureka/backend/api/Call.Validator.html @@ -1,319 +1,491 @@ - + + - -Call.Validator (neureka 1.0.0 API) - - - - + +Call.Validator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Call.Validator

    -
    -
    java.lang.Object -
    neureka.backend.api.Call.Validator
    +
    neureka.backend.api
    +

    Class Call.Validator

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.Call.Validator
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      Call<D>
      +
      Call<D>

      -
      public class Call.Validator -extends Object
      +
      +
      public class Call.Validator
      +extends java.lang.Object
      This is a simple nested class offering various lambda based methods - for validating the tensor arguments stored inside this ExecutionCall. + for validating the tensor arguments stored inside this ExecutionCall. It is a useful tool readable as well as concise validation of a given request for execution, that is primarily used inside implementations of the middle - layer of the backend-API architecture (SuitabilityPredicate.isSuitableFor(ExecutionCall)).
      -
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Validator

        -
        public Validator()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Validator

            +
            public Validator()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      isValid

      -
      public boolean isValid()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isValid

          +
          public boolean isValid()
        • -
        • -
          -

          ifValid

          -
          public <T> Call.Else<T> ifValid(T value)
          -
          +
        + + + + + +
          +
        • +

          ifValid

          +
          public <T> Call.Else<T> ifValid(T value)
        • -
        • -
          -

          basicSuitability

          -
          public float basicSuitability()
          +
        + + + +
          +
        • +

          basicSuitability

          +
          public float basicSuitability()
          The validity as float being >0/true and 0/false. - If the Call is valid then a suitability estimation of 0.9f + If the Call is valid then a suitability estimation of 0.9f will be returned simply because a suitability of 1 would mean that no other algorithm could ever compete with this one if if was faster or simply better suited!
          -
          -
          Returns:
          +
          +
          Returns:
          The current validity of this Validator as float value.
          -
  • -
  • -
    -

    suitabilityIfValid

    -
    public float suitabilityIfValid(float estimationIfValid)
    -
    + + + + +
      +
    • +

      suitabilityIfValid

      +
      public float suitabilityIfValid(float estimationIfValid)
    • -
    • -
      -

      getEstimator

      -
      public Call<D>.Validator.Estimator getEstimator()
      -
      +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
    • +

      allShare

      +
      public <T> Call.Validator allShare(java.util.function.Function<Tensor<?>,T> propertyProvider)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Call.html b/docs/jdocs/neureka/backend/api/Call.html index f7d27e517..5afad2e1e 100644 --- a/docs/jdocs/neureka/backend/api/Call.html +++ b/docs/jdocs/neureka/backend/api/Call.html @@ -1,415 +1,597 @@ - + + - -Call (neureka 1.0.0 API) - - - - + +Call (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Call<D>

    -
    -
    java.lang.Object -
    neureka.backend.api.Call<D>
    +
    neureka.backend.api
    +

    Class Call<D>

    -
    -
    -
    Type Parameters:
    -
    D - The type parameter which defines the Device targeted by this Call.
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.Call<D>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      D - The type parameter which defines the Device targeted by this Call.
      -
      +
      Direct Known Subclasses:
      -
      ExecutionCall
      +
      ExecutionCall

      -
      public class Call<D> -extends Object
      +
      +
      public class Call<D>
      +extends java.lang.Object
      Instances of this class model simple execution calls to the backend. - They can be passed to Function instances in order to get full - control over the execution via the use of call Args. - This class is the precursor class of ExecutionCall which is a more complete + They can be passed to Function instances in order to get full + control over the execution via the use of call Args. + This class is the precursor class of ExecutionCall which is a more complete execution state bundle used inside the backend.
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        _device

        -
        protected final D _device
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            _device

            +
            protected final D _device
            This field references the device on which this ExecutionCall should be executed.
            -
      • -
      • -
        -

        _arguments

        -
        protected final Args _arguments
        +
      + + + +
        +
      • +

        _arguments

        +
        protected final Args _arguments
        Meta arguments which are usually specific to certain operations.
        -
    • -
    • -
      -

      _inputs

      -
      protected final Tensor<?>[] _inputs
      +
    + + + +
      +
    • +

      _inputs

      +
      protected final Tensor<?>[] _inputs
      The tensor arguments from which an operation will either read or to which it will write.
      The first entry of this array is usually containing the output tensor, however this is not a necessity. Some operation algorithms might use multiple argument entries as output tensors.
      -
    - + -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      Call

      -
      protected Call(Tensor<?>[] tensors, - D device, - List<Arg> arguments)
      -
      +
        +
      • + + +

        Constructor Detail

        + + + + + +
          +
        • +

          Call

          +
          protected Call(Tensor<?>[] tensors,
          +               D device,
          +               java.util.List<Arg> arguments)
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      to

      -
      public static <V, -T extends Device<V>> -Call.Builder<V,T> to(T device)
      -
      +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          to

          +
          public static <V,T extends Device<V>> Call.Builder<V,T> to(T device)
        • -
        • -
          -

          getDevice

          -
          public D getDevice()
          -
          -
          Returns:
          +
        + + + +
          +
        • +

          getDevice

          +
          public D getDevice()
          +
          +
          Returns:
          The device targeted by this call for execution.
          -
  • -
  • -
    -

    inputs

    -
    public Tensor<?>[] inputs()
    -
    -
    Returns:
    -
    The Tensor parameters of this Call for execution.
    + + + + +
      +
    • +

      inputs

      +
      public Tensor<?>[] inputs()
      +
      +
      Returns:
      +
      The Tensor parameters of this Call for execution.
      -
  • -
  • -
    -

    arity

    -
    public int arity()
    -
    -
    Returns:
    + + + + +
      +
    • +

      arity

      +
      public int arity()
      +
      +
      Returns:
      The number of input tensors.
      -
  • -
  • -
    -

    input

    -
    public Tensor<?> input(int i)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      input

      +
      public Tensor<?> input(int i)
      +
      +
      Parameters:
      i - The index of the tensor argument which should be returned.
      -
      Returns:
      -
      The i'th Tensor parameter of this Call for execution.
      +
      Returns:
      +
      The i'th Tensor parameter of this Call for execution.
      -
  • -
  • -
    -

    rearrangeInputs

    -
    public void rearrangeInputs(int... indices)
    -
    + + + + +
      +
    • +

      rearrangeInputs

      +
      public void rearrangeInputs(int... indices)
    • -
    • -
      -

      getDeviceFor

      -
      public <T> Device<T> getDeviceFor(Class<T> supportCheck)
      -
      +
    + + + +
      +
    • +

      getDeviceFor

      +
      public <T> Device<T> getDeviceFor(java.lang.Class<T> supportCheck)
    • -
    • -
      -

      allMetaArgs

      -
      public List<Arg> allMetaArgs()
      -
      +
    + + + +
      +
    • +

      allMetaArgs

      +
      public java.util.List<Arg> allMetaArgs()
    • -
    • -
      -

      get

      -
      public <V, -T extends Arg<V>> T get(Class<T> argumentClass)
      -
      +
    + + + +
      +
    • +

      get

      +
      public <V,T extends Arg<V>> T get(java.lang.Class<T> argumentClass)
    • -
    • -
      -

      getValOf

      -
      public <V, -T extends Arg<V>> V getValOf(Class<T> argumentClass)
      -
      +
    + + + +
      +
    • +

      getValOf

      +
      public <V,T extends Arg<V>> V getValOf(java.lang.Class<T> argumentClass)
    • -
    • -
      -

      getDerivativeIndex

      -
      public int getDerivativeIndex()
      -
      +
    + + + +
      +
    • +

      getDerivativeIndex

      +
      public int getDerivativeIndex()
    • -
    • -
      -

      input

      -
      public <V> Tensor<V> input(Class<V> valueTypeClass, - int i)
      -
      +
    + + + +
      +
    • +

      input

      +
      public <V> Tensor<V> input(java.lang.Class<V> valueTypeClass,
      +                           int i)
    • -
    • -
      -

      validate

      -
      public Call<D>.Validator validate()
      -
      +
    + + + + -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/DeviceAlgorithm.html b/docs/jdocs/neureka/backend/api/DeviceAlgorithm.html index 4f4f197b7..7af9dd6dd 100644 --- a/docs/jdocs/neureka/backend/api/DeviceAlgorithm.html +++ b/docs/jdocs/neureka/backend/api/DeviceAlgorithm.html @@ -1,258 +1,395 @@ - + + - -DeviceAlgorithm (neureka 1.0.0 API) - - - - + +DeviceAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface DeviceAlgorithm<C extends DeviceAlgorithm<C>>

    +
    neureka.backend.api
    +

    Interface DeviceAlgorithm<C extends DeviceAlgorithm<C>>

    -
    -
    -
    Type Parameters:
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      - + + + + + +
        +
      • +

        setImplementationFor

        +
        <D extends Device<?>,I extends ImplementationFor<D>> C setImplementationFor(java.lang.Class<D> deviceClass,
        +                                                                            I implementation)
        +
        Implementations of the DeviceAlgorithm interface ought to express a compositional design pattern.
        This means that concrete implementations of an algorithm for a device are not extending an Algorithm, they are components of it instead.
        These components can be stored on an Algorithm by passing a Device class as key and an ImplementationFor instance as value.
        -
        -
        Type Parameters:
        -
        D - The type parameter of the Device type for which - an implementation should be set in this Device.
        -
        I - The type of the ImplementationFor the provided Device type.
        -
        Parameters:
        -
        deviceClass - The class of the Device for which an implementation should be set.
        -
        implementation - The ImplementationFor the provided Device type.
        -
        Returns:
        -
        This very Algorithm instance to allow for method chaining.
        +
        +
        Type Parameters:
        +
        D - The type parameter of the Device type for which + an implementation should be set in this Device.
        +
        I - The type of the ImplementationFor the provided Device type.
        +
        Parameters:
        +
        deviceClass - The class of the Device for which an implementation should be set.
        +
        implementation - The ImplementationFor the provided Device type.
        +
        Returns:
        +
        This very Algorithm instance to allow for method chaining.
        -
    • -
    • -
      -

      getImplementationFor

      -
      <D extends Device<?>> ImplementationFor<D> getImplementationFor(Class<D> deviceClass)
      -
      An ImplementationFor a specific Device can be accessed by passing the class of - the Device for which an implementation should be returned. - An Algorithm instance ought to contain a collection of these Device specific +
    + + + +
      +
    • +

      getImplementationFor

      +
      <D extends Device<?>> ImplementationFor<D> getImplementationFor(java.lang.Class<D> deviceClass)
      +
      An ImplementationFor a specific Device can be accessed by passing the class of + the Device for which an implementation should be returned. + An Algorithm instance ought to contain a collection of these Device specific implementations...
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      D - The type parameter which has to be a class extending the Device interface.
      -
      Parameters:
      +
      Parameters:
      deviceClass - The class of the device for which the stored algorithm implementation should be returned.
      -
      Returns:
      +
      Returns:
      The implementation for the passed device type class.
      -
    • -
    • -
      -

      getImplementationFor

      -
      default <D extends Device<?>> ImplementationFor<D> getImplementationFor(D device)
      -
      An ImplementationFor a specific Device can be accessed by passing - the Device for which an implementation should be returned. - An Algorithm instance ought to contain a collection of these Device specific +
    + + + + + +
      +
    • +

      getImplementationFor

      +
      default <D extends Device<?>> ImplementationFor<D> getImplementationFor(D device)
      +
      An ImplementationFor a specific Device can be accessed by passing + the Device for which an implementation should be returned. + An Algorithm instance ought to contain a collection of these Device specific implementations...
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      D - type parameter which has to be a class extending the Device interface.
      -
      Parameters:
      +
      Parameters:
      device - The device for which the stored algorithm implementation should be returned.
      -
      Returns:
      +
      Returns:
      The implementation for the passed device type class.
      -
    • -
    • -
      -

      hasImplementationFor

      -
      default <D extends Device<?>> boolean hasImplementationFor(D device)
      -
      +
    + + + + + +
      +
    • +

      hasImplementationFor

      +
      default <D extends Device<?>> boolean hasImplementationFor(D device)
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ExecutionCall.Builder.html b/docs/jdocs/neureka/backend/api/ExecutionCall.Builder.html index 536470bf1..228ee06bb 100644 --- a/docs/jdocs/neureka/backend/api/ExecutionCall.Builder.html +++ b/docs/jdocs/neureka/backend/api/ExecutionCall.Builder.html @@ -1,166 +1,283 @@ - + + - -ExecutionCall.Builder (neureka 1.0.0 API) - - - - + +ExecutionCall.Builder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ExecutionCall.Builder<D extends Device<?>>

    +
    neureka.backend.api
    +

    Class ExecutionCall.Builder<D extends Device<?>>

    -
    java.lang.Object -
    neureka.backend.api.ExecutionCall.Builder<D>
    -
    -
    -
    -
    Type Parameters:
    -
    D - The type parameter for the device targeted by the ExecutionCall built by this builder.
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.ExecutionCall.Builder<D>
      • +
      +
    • +
    +
    +
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    + - -
    -
      +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ExecutionCall.html b/docs/jdocs/neureka/backend/api/ExecutionCall.html index 9d0c388b7..9685e0d85 100644 --- a/docs/jdocs/neureka/backend/api/ExecutionCall.html +++ b/docs/jdocs/neureka/backend/api/ExecutionCall.html @@ -1,92 +1,126 @@ - + + - -ExecutionCall (neureka 1.0.0 API) - - - - + +ExecutionCall (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ExecutionCall<D extends Device<?>>

    -
    -
    java.lang.Object -
    neureka.backend.api.Call<D> -
    neureka.backend.api.ExecutionCall<D>
    -
    +
    neureka.backend.api
    +

    Class ExecutionCall<D extends Device<?>>

    -
    -
    -
    Type Parameters:
    +
    + +
    +
      +
    • +
      +
      Type Parameters:
      D - The Device implementation targeted by an instance of this ExecutionCall!

      -
      public class ExecutionCall<D extends Device<?>> -extends Call<D>
      +
      +
      public class ExecutionCall<D extends Device<?>>
      +extends Call<D>
      This class is a simple container holding references to a targeted - Device, Operation and maybe some case specific - meta Args needed to execute + Device, Operation and maybe some case specific + meta Args needed to execute an array of input tensors which are also wrapped by this.

      This class is technically immutable, however the contents @@ -94,253 +128,386 @@

      Class ExecutionCall<D extends < The meta arguments wrapped by this are responsible for storing operation specific variables like for example an input index for calculating a partial derivative. Certain operations might require other unique types of arguments...

      -
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static class 
      - -
       
      +
    • +
    - - +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        checkArity

        +
        public void checkArity()
      • -
      • -
        -

        getOperation

        -
        public Operation getOperation()
        +
      + + + +
        +
      • +

        getOperation

        +
        public Operation getOperation()
        This returns the operation which will ultimately process this execution call. It contains multiple algorithms and device specific implementations, one of which might be applicable to this call...

        -
        -
        Returns:
        -
        The Operation targeted by this call for execution.
        +
        +
        Returns:
        +
        The Operation targeted by this call for execution.
        -
    • -
    • -
      -

      withInputs

      -
      public ExecutionCall<D> withInputs(Tensor<?>... inputs)
      +
    + + + +
      +
    • +

      withInputs

      +
      public ExecutionCall<D> withInputs(Tensor<?>... inputs)
      Use this to produce a clone with a new array of input tensors.
      -
      -
      Parameters:
      -
      inputs - The new array of input tensors for the new ExecutionCall returned by this.
      -
      Returns:
      -
      A new ExecutionCall instance with the provided array of input tensors.
      +
      +
      Parameters:
      +
      inputs - The new array of input tensors for the new ExecutionCall returned by this.
      +
      Returns:
      +
      A new ExecutionCall instance with the provided array of input tensors.
      -
    • -
    • -
      -

      withAddedInputAt

      -
      public ExecutionCall<D> withAddedInputAt(int index, - Tensor<?> added)
      -
      +
    + + + + + + + +
      +
    • +

      withInputAt

      +
      public ExecutionCall<D> withInputAt(int index,
      +                                    Tensor<?> replacement)
    • -
    • -
      -

      withRemovedInputAt

      -
      public ExecutionCall<D> withRemovedInputAt(int index)
      -
      +
    + + + + + + + + + + + +
      +
    • +

      withArgs

      +
      public ExecutionCall<D> withArgs(Arg<?>... args)
      Use this to produce a clone with a new set of meta arguments.
      -
      -
      Parameters:
      -
      args - The new set of meta args for the new ExecutionCall returned by this.
      -
      Returns:
      -
      A new ExecutionCall instance with the provided set of meta arguments.
      +
      +
      Parameters:
      +
      args - The new set of meta args for the new ExecutionCall returned by this.
      +
      Returns:
      +
      A new ExecutionCall instance with the provided set of meta arguments.
      -
    • -
    • -
      -

      getAlgorithm

      -
      public Algorithm getAlgorithm()
      -
      An ExecutionCall will either already have a targeted Algorithm defined - at instantiation or otherwise it will query the associated Operation - for an Algorithm best suitable for the state of this ExecutionCall. +
    + + + +
      +
    • +

      getAlgorithm

      +
      public Algorithm getAlgorithm()
      +
      An ExecutionCall will either already have a targeted Algorithm defined + at instantiation or otherwise it will query the associated Operation + for an Algorithm best suitable for the state of this ExecutionCall. Generally speaking, this method should only very rarely return null, however, if it does, then this most definitely means that there is nor backend support for this call for execution...
      -
      -
      Returns:
      -
      The Algorithm suitable for this ExecutionCall.
      +
      +
      Returns:
      +
      The Algorithm suitable for this ExecutionCall.
      -
    • -
    • -
      -

      autogradMode

      -
      public AutoDiffMode autogradMode()
      -
      This method queries the underlying Operation for a suitable Algorithm - for this ExecutionCall to see what kind of auto differentiation can be performed.
      -
      -
      Returns:
      -
      The AutoDiffMode for this call.
      +
    + + + + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Extensions.html b/docs/jdocs/neureka/backend/api/Extensions.html index 9d5e11d1d..cf5052598 100644 --- a/docs/jdocs/neureka/backend/api/Extensions.html +++ b/docs/jdocs/neureka/backend/api/Extensions.html @@ -1,173 +1,234 @@ - + + - -Extensions (neureka 1.0.0 API) - - - - + +Extensions (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Extensions

    -
    -
    java.lang.Object -
    neureka.common.composition.AbstractComponentOwner<Extensions> -
    neureka.backend.api.Extensions
    +
    neureka.backend.api
    +

    Class Extensions

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Extensions

        -
        public Extensions()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Extensions

            +
            public Extensions()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + + + +
      +
    • +

      _removeOrReject

      +
      protected <T extends Component<Extensions>> T _removeOrReject(T newComponent)
      +
      Description copied from class: AbstractComponentOwner
      An implementation of this method checks if the passed component should be removed from the component collection of this class or its removal should be "rejected". Rejection in this case simply means that it returns null instead of the passed component.
      -
      -
      Specified by:
      -
      _removeOrReject in class AbstractComponentOwner<Extensions>
      -
      Parameters:
      +
      +
      Specified by:
      +
      _removeOrReject in class AbstractComponentOwner<Extensions>
      +
      Parameters:
      newComponent - The component which should be removed from the components list.
      -
      Returns:
      +
      Returns:
      The same component or null if its removal has been rejected.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ImplementationFor.html b/docs/jdocs/neureka/backend/api/ImplementationFor.html index d0b5480be..91647f025 100644 --- a/docs/jdocs/neureka/backend/api/ImplementationFor.html +++ b/docs/jdocs/neureka/backend/api/ImplementationFor.html @@ -1,157 +1,251 @@ - + + - -ImplementationFor (neureka 1.0.0 API) - - - - + +ImplementationFor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ImplementationFor<D extends Device<?>>

    +
    neureka.backend.api
    +

    Interface ImplementationFor<D extends Device<?>>

    -
    -
    -
    Type Parameters:
    +
    +
    +
    -
    -
      + of an execution procedure tailored to a specific Device (interface) instance + and Algorithm (interface) instance! + Instances of implementations of the ImplementationFor interface are components + of instances of implementations of the Algorithm interface, + which themselves are components of Operation implementation instances.
    + + +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        Tensor<?>run(ExecutionCall<D> call)
        This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        run

        -
        Tensor<?> run(ExecutionCall<D> call)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            run

            +
            Tensor<?> run(ExecutionCall<D> call)
            This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
            -
            -
            Parameters:
            +
            +
            Parameters:
            call - The call which ought to be executed on this implementation.
            -
      -
    - + + +
    + - + + + + diff --git a/docs/jdocs/neureka/backend/api/LazyRef.html b/docs/jdocs/neureka/backend/api/LazyRef.html index 32aa43fde..120ee0898 100644 --- a/docs/jdocs/neureka/backend/api/LazyRef.html +++ b/docs/jdocs/neureka/backend/api/LazyRef.html @@ -1,161 +1,272 @@ - + + - -LazyRef (neureka 1.0.0 API) - - - - + +LazyRef (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class LazyRef<V>

    +
    neureka.backend.api
    +

    Class LazyRef<V>

    -
    java.lang.Object -
    neureka.backend.api.LazyRef<V>
    -
    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.LazyRef<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The value type parameter of the thing wrapped by this.

      -
      public final class LazyRef<V> -extends Object
      +
      +
      public final class LazyRef<V>
      +extends java.lang.Object
      This will simply fetch a variable from a lambda once and then continuously return this one value. In a sense it is a lazy reference! This is an internal class, do not depend on this outside this package.
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      get()
      -
       
      -
      static <V> LazyRef<V>
      -
      of(Supplier<V> source)
      -
       
      - - -
       
      -
      -
      +
    • +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        Vget() 
        static <V> LazyRef<V>of(java.util.function.Supplier<V> source) 
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
      - -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static <V> LazyRef<V> of(Supplier<V> source)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            public static <V> LazyRef<V> of(java.util.function.Supplier<V> source)
          • -
          • -
            -

            get

            -
            public V get()
            -
            +
          + + + +
            +
          • +

            get

            +
            public V get()
          • -
          • -
            -

            toString

            -
            public String toString()
            -
            -
            Overrides:
            -
            toString in class Object
            +
          + + + +
            +
          • +

            toString

            +
            public java.lang.String toString()
            +
            +
            Overrides:
            +
            toString in class java.lang.Object
            -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Operation.html b/docs/jdocs/neureka/backend/api/Operation.html index 68b3ca0a3..88e8ae92f 100644 --- a/docs/jdocs/neureka/backend/api/Operation.html +++ b/docs/jdocs/neureka/backend/api/Operation.html @@ -1,443 +1,554 @@ - + + - -Operation (neureka 1.0.0 API) - - - - + +Operation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Operation

    +
    neureka.backend.api
    +

    Interface Operation

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      asDerivative(Function[] children, - int derivationIndex)
      -
      -
      Operation implementations and Function implementations are in a tight relationship - where the Function describes an abstract syntax tree based on the syntactic information provided - by the Operation (through methods like getOperator() or getIdentifier()).
      + and optionally also an operator in the form of String instances. + Alongside there must be an implementation of the stringify(String[]) method, + which ought to generate a String view as part of a Function-AST.
      +
    • +
    - - -
     
    -
    double
    -
    calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        builder

        -
        static OperationBuilder builder()
        -
        +
          +
        • + + +

          Method Detail

          + + + + + + + +
            +
          • +

            getAllAlgorithms

            +
            Algorithm[] getAllAlgorithms()
          • -
          • -
            -

            getAlgorithmFor

            -
            Algorithm getAlgorithmFor(ExecutionCall<?> call)
            -
            Alongside a component system made up of Algorithm instances, implementations - of this interface also ought to express a routing mechanism which finds the best Algorithm - for a given ExecutionCall instance. +
          + + + +
            +
          • +

            getAlgorithmFor

            +
            Algorithm getAlgorithmFor(ExecutionCall<?> call)
            +
            Alongside a component system made up of Algorithm instances, implementations + of this interface also ought to express a routing mechanism which finds the best Algorithm + for a given ExecutionCall instance. This method signature describes this requirement.
            -
            -
            Parameters:
            -
            call - The ExecutionCall instance which needs the best Algorithm for execution.
            -
            Returns:
            -
            The chosen Algorithm which ought to be fir for execution the provided call.
            +
            +
            Parameters:
            +
            call - The ExecutionCall instance which needs the best Algorithm for execution.
            +
            Returns:
            +
            The chosen Algorithm which ought to be fir for execution the provided call.
            -
    • -
    • -
      -

      setAlgorithm

      -
      <T extends Algorithm> Operation setAlgorithm(Class<T> type, - T instance)
      -
      Operation implementations embody a component system hosting unique Algorithm instances. - For a given class implementing the Algorithm class, there can only be a single - instance of it referenced (aka supported) by a given Operation instance. - This method enables the registration of Algorithm types in the component system of this Operation.
      -
      -
      Type Parameters:
      -
      T - The type parameter of the Algorithm type class.
      -
      Parameters:
      -
      type - The class of the type which implements Algorithm as key for the provided instance.
      -
      instance - The instance of the provided type class which ought to be referenced (supported) by this Operation.
      -
      Returns:
      -
      This very Operation instance to enable method chaining on it.
      +
    + + + + + +
      +
    • +

      setAlgorithm

      +
      <T extends AlgorithmOperation setAlgorithm(java.lang.Class<T> type,
      +                                             T instance)
      +
      Operation implementations embody a component system hosting unique Algorithm instances. + For a given class implementing the Algorithm class, there can only be a single + instance of it referenced (aka supported) by a given Operation instance. + This method enables the registration of Algorithm types in the component system of this Operation.
      +
      +
      Type Parameters:
      +
      T - The type parameter of the Algorithm type class.
      +
      Parameters:
      +
      type - The class of the type which implements Algorithm as key for the provided instance.
      +
      instance - The instance of the provided type class which ought to be referenced (supported) by this Operation.
      +
      Returns:
      +
      This very Operation instance to enable method chaining on it.
      -
    • -
    • -
      -

      setAlgorithm

      -
      default <T extends Algorithm> Operation setAlgorithm(T instance)
      -
      +
    + + + + + +
      +
    • +

      setAlgorithm

      +
      default <T extends AlgorithmOperation setAlgorithm(T instance)
    • -
    • -
      -

      getAlgorithm

      -
      <T extends Algorithm> T getAlgorithm(Class<T> type)
      -
      Operation implementations embody a component system hosting unique Algorithm instances. - For a given class implementing the Algorithm class, there can only be a single - instance of it referenced (aka supported) by a given Operation instance. +
    + + + +
      +
    • +

      getAlgorithm

      +
      <T extends Algorithm> T getAlgorithm(java.lang.Class<T> type)
      +
      Operation implementations embody a component system hosting unique Algorithm instances. + For a given class implementing the Algorithm class, there can only be a single + instance of it referenced (aka supported) by a given Operation instance. This method ensures this in terms of read access by returning only a single instance or null - based on the provided class instance whose type extends the Algorithm interface.
      -
      -
      Type Parameters:
      -
      T - The type parameter of the Algorithm type class.
      -
      Parameters:
      -
      type - The class of the type which implements Algorithm as a key to get an existing instance.
      -
      Returns:
      -
      The instance of the specified type if any exists within this Operation.
      + based on the provided class instance whose type extends the Algorithm interface.
    +
    +
    Type Parameters:
    +
    T - The type parameter of the Algorithm type class.
    +
    Parameters:
    +
    type - The class of the type which implements Algorithm as a key to get an existing instance.
    +
    Returns:
    +
    The instance of the specified type if any exists within this Operation.
    - -
  • -
    -

    supportsAlgorithm

    -
    <T extends Algorithm> boolean supportsAlgorithm(Class<T> type)
    -
    This method checks if this Operation contains an instance of the - Algorithm implementation specified via its type class.
    -
    -
    Type Parameters:
    -
    T - The type parameter of the Algorithm type class.
    -
    Parameters:
    -
    type - The class of the type which implements Algorithm.
    -
    Returns:
    -
    The truth value determining if this Operation contains an instance of the specified Algorithm type.
    + + + + +
      +
    • +

      supportsAlgorithm

      +
      <T extends Algorithm> boolean supportsAlgorithm(java.lang.Class<T> type)
      +
      This method checks if this Operation contains an instance of the + Algorithm implementation specified via its type class.
      +
      +
      Type Parameters:
      +
      T - The type parameter of the Algorithm type class.
      +
      Parameters:
      +
      type - The class of the type which implements Algorithm.
      +
      Returns:
      +
      The truth value determining if this Operation contains an instance of the specified Algorithm type.
      -
  • -
  • -
    -

    getIdentifier

    -
    String getIdentifier()
    -
    Concrete Operation types ought to be representable by a function name. + + + + +
      +
    • +

      getIdentifier

      +
      java.lang.String getIdentifier()
      +
      Concrete Operation types ought to be representable by a function name. The following ensures that this contract is met when overriding the method.
      -
      -
      Returns:
      -
      the function name which serves as identifier when parsing Function instances.
      +
      +
      Returns:
      +
      the function name which serves as identifier when parsing Function instances.
      -
  • -
  • -
    -

    stringify

    -
    String stringify(String[] children)
    -
    + + + + +
      +
    • +

      stringify

      +
      java.lang.String stringify(java.lang.String[] children)
    • -
    • -
      -

      asDerivative

      -
      String asDerivative(Function[] children, - int derivationIndex)
      -
      Operation implementations and Function implementations are in a tight relationship - where the Function describes an abstract syntax tree based on the syntactic information provided - by the Operation (through methods like getOperator() or getIdentifier()). - One important feature of the Function is the ability to create - derivatives by calling the Function.getDerivative(int) method. - Implementations of this Function method ought to call the method defined below in order to - form the derivation based on the child nodes of the abstract syntax tree of the given Function node.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      asDerivative

      +
      java.lang.String asDerivative(Function[] children,
      +                              int derivationIndex)
      +
      Operation implementations and Function implementations are in a tight relationship + where the Function describes an abstract syntax tree based on the syntactic information provided + by the Operation (through methods like getOperator() or getIdentifier()). + One important feature of the Function is the ability to create + derivatives by calling the Function.getDerivative(int) method. + Implementations of this Function method ought to call the method defined below in order to + form the derivation based on the child nodes of the abstract syntax tree of the given Function node.
      +
      +
      Parameters:
      children - The child nodes of a AST node referencing this operation.
      derivationIndex - The index of the input node which ought to be derived.
      -
      Returns:
      -
      The derivative as a String which should be parsable into yet another AST.
      +
      Returns:
      +
      The derivative as a String which should be parsable into yet another AST.
      -
    • -
    • -
      -

      getOperator

      -
      String getOperator()
      -
      +
    + + + +
      +
    • +

      getOperator

      +
      java.lang.String getOperator()
    • -
    • -
      -

      getArity

      -
      int getArity()
      +
    + + + +
      +
    • +

      getArity

      +
      int getArity()
      Arity is the number of arguments or operands that this function or operation takes.
      -
      -
      Returns:
      +
      +
      Returns:
      The number of arguments expected by this operation, or -1 if an arbitrary number is accepted.
      -
    • -
    • -
      -

      isOperator

      -
      boolean isOperator()
      +
    + + + +
      +
    • +

      isOperator

      +
      boolean isOperator()
      An operator is an alternative to a function like "sum()" or "prod()".
      Examples would be "+, -, * ..."!
      -
      -
      Returns:
      +
      +
      Returns:
      If this operation can be represented as operator like "+, -, * ..."!
      -
    • -
    • -
      -

      isIndexer

      -
      boolean isIndexer()
      -
      This boolean property tell the Function implementations that this Operation +
    + + + +
      +
    • +

      isIndexer

      +
      boolean isIndexer()
      +
      This boolean property tell the Function implementations that this Operation ought to be viewed as something to be indexed. - The Function will use this information to iterate over all the provided inputs and + The Function will use this information to iterate over all the provided inputs and then execute the function wile also passing the index to the function AST. - The resulting array will then be available to this Operation as argument list. - This feature works alongside the Function implementation found in - FunctionVariable, which represents an input indexed + The resulting array will then be available to this Operation as argument list. + This feature works alongside the Function implementation found in + FunctionVariable, which represents an input indexed by the identifier 'j'!
      -
      -
      Returns:
      +
      +
      Returns:
      If this operation is an indexer.
      -
    • -
    • -
      -

      isDifferentiable

      -
      @Deprecated -boolean isDifferentiable()
      -
      Deprecated.
      -
      +
    + + + +
      +
    • +

      isDifferentiable

      +
      @Deprecated
      +boolean isDifferentiable()
      +
      Deprecated. 
    • -
    • -
      -

      isInline

      -
      boolean isInline()
      -
      This flag indicates that the implementation of this Operation +
    + + + +
      +
    • +

      isInline

      +
      boolean isInline()
      +
      This flag indicates that the implementation of this Operation performs an operation which modifies the inputs to that operation. An example of this would be an assignment operation which copies the contents of one nd-array / tensor into another tensor. This second tensor will then have changed its state. This can be dangerous when auto-differentiation is involved.
      -
      -
      Returns:
      -
      The truth value determining if this Operation changes the contents of inputs.
      +
      +
      Returns:
      +
      The truth value determining if this Operation changes the contents of inputs.
      -
    • -
    • -
      -

      supports

      -
      <T extends Algorithm> boolean supports(Class<T> implementation)
      -
      +
    + + + + + + + + + + + +
      +
    • +

      calculate

      +
      double calculate(double[] inputs,
      +                 int j,
      +                 int d,
      +                 Function[] src)
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -453,25 +564,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/Result.html b/docs/jdocs/neureka/backend/api/Result.html index 43bfd92ad..756dcd0c4 100644 --- a/docs/jdocs/neureka/backend/api/Result.html +++ b/docs/jdocs/neureka/backend/api/Result.html @@ -1,169 +1,288 @@ - + + - -Result (neureka 1.0.0 API) - - - - + +Result (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Result

    -
    -
    java.lang.Object -
    neureka.backend.api.Result
    +
    neureka.backend.api
    +

    Class Result

    -
    -
    -
    public final class Result -extends Object
    -
    An immutable wrapper for a tensor as a result of anb Execution - as well as an ADActionSupplier for providing auto-differentiation support.
    -
    -
    -
    +
    + - -
    -
      +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/fun/ADActionSupplier.html b/docs/jdocs/neureka/backend/api/fun/ADActionSupplier.html index a20b30333..baee4db35 100644 --- a/docs/jdocs/neureka/backend/api/fun/ADActionSupplier.html +++ b/docs/jdocs/neureka/backend/api/fun/ADActionSupplier.html @@ -1,162 +1,256 @@ - + + - -ADActionSupplier (neureka 1.0.0 API) - - - - + +ADActionSupplier (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ADActionSupplier

    +
    neureka.backend.api.fun
    +

    Interface ADActionSupplier

    -
    -
    +
    +
    +
      +
    • +
      All Known Implementing Classes:
      -
      FallbackAlgorithm
      +
      FallbackAlgorithm
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public interface ADActionSupplier
      +
      +
      @FunctionalInterface
      +public interface ADActionSupplier
      Implementations of this functional interface ought to return a new instance - of the ADAction class responsible for performing automatic differentiation + of the ADAction class responsible for performing automatic differentiation both for forward and backward mode differentiation.
      - Therefore an ADAction exposes 2 different procedures.
      + Therefore an ADAction exposes 2 different procedures.
      One is the forward mode differentiation, and the other one
      is the backward mode differentiation which is more commonly known as back-propagation...
      Besides that it may also contain context information used
      to perform said procedures.
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        supplyADActionFor

        -
        ADAction supplyADActionFor(Function function, - ExecutionCall<? extends Device<?>> call)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            supplyADActionFor

            +
            ADAction supplyADActionFor(Function function,
            +                           ExecutionCall<? extends Device<?>> call)
            This method ought to return a new instance - if the ADAction class responsible for performing automatic differentiation + if the ADAction class responsible for performing automatic differentiation both for forward and backward mode differentiation.
            - Therefore an ADAction exposes 2 different procedures.
            + Therefore an ADAction exposes 2 different procedures.
            One is the forward mode differentiation, and the other one
            is the backward mode differentiation which is more commonly known as back-propagation...
            Besides that it may also contain context information used
            to perform said procedures.
            -
            -
            Parameters:
            +
            +
            Parameters:
            function - The function from where the request for auto differentiation originates.
            call - The execution call of the current execution which requires auto differentiation support.
            -
            Returns:
            -
            The resulting ADAction.
            +
            Returns:
            +
            The resulting ADAction.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/fun/ADSupportPredicate.html b/docs/jdocs/neureka/backend/api/fun/ADSupportPredicate.html index 04a38ca0e..2a575f9b5 100644 --- a/docs/jdocs/neureka/backend/api/fun/ADSupportPredicate.html +++ b/docs/jdocs/neureka/backend/api/fun/ADSupportPredicate.html @@ -1,151 +1,245 @@ - + + - -ADSupportPredicate (neureka 1.0.0 API) - - - - + +ADSupportPredicate (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ADSupportPredicate

    +
    neureka.backend.api.fun
    +

    Interface ADSupportPredicate

    -
    -
    +
    +
    +
    -
    -
      +
      +
      @FunctionalInterface
      +public interface ADSupportPredicate
      +
      A ADSupportPredicate lambda checks which auto differentiation mode + can be performed for a given ExecutionCall. + The analyzer returns a AutoDiffMode enum instance.
      + +
    +
    +
    + - -
    -
      +
    +
    +
    +
    +
    Parameters:
    +
    call - The ExecutionCall which should be checked.
    +
    Returns:
    +
    A AutoDiffMode enum instance describing what kind of differentiation can be performed.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/fun/Execution.html b/docs/jdocs/neureka/backend/api/fun/Execution.html index 96aa5979d..5e43a8b96 100644 --- a/docs/jdocs/neureka/backend/api/fun/Execution.html +++ b/docs/jdocs/neureka/backend/api/fun/Execution.html @@ -1,162 +1,256 @@ - + + - -Execution (neureka 1.0.0 API) - - - - + +Execution (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Execution

    +
    neureka.backend.api.fun
    +

    Interface Execution

    -
    -
    +
    +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/fun/ExecutionPreparation.html b/docs/jdocs/neureka/backend/api/fun/ExecutionPreparation.html index fc98fd979..39454bba7 100644 --- a/docs/jdocs/neureka/backend/api/fun/ExecutionPreparation.html +++ b/docs/jdocs/neureka/backend/api/fun/ExecutionPreparation.html @@ -1,91 +1,117 @@ - + + - -ExecutionPreparation (neureka 1.0.0 API) - - - - + +ExecutionPreparation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ExecutionPreparation

    +
    neureka.backend.api.fun
    +

    Interface ExecutionPreparation

    -
    -
    +
    +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/fun/SuitabilityPredicate.html b/docs/jdocs/neureka/backend/api/fun/SuitabilityPredicate.html index e8c0869f8..44e2bc07e 100644 --- a/docs/jdocs/neureka/backend/api/fun/SuitabilityPredicate.html +++ b/docs/jdocs/neureka/backend/api/fun/SuitabilityPredicate.html @@ -1,337 +1,434 @@ - + + - -SuitabilityPredicate (neureka 1.0.0 API) - - - - + +SuitabilityPredicate (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface SuitabilityPredicate

    +
    neureka.backend.api.fun
    +

    Interface SuitabilityPredicate

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      -
      static final float
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static floatBAD 
        static floatEXCELLENT 
        static floatGOOD 
        static floatNOT_GOOD 
        static floatOKAY 
        static floatPERFECT 
        static floatTERRIBLE 
        static floatUNSUITABLE 
        static floatVERY_GOOD 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      float
      -
      isSuitableFor(ExecutionCall<? extends Device<?>> call)
      -
      -
      When an ExecutionCall instance has been formed then it will be routed by
      - the given Operation instance to their components, namely :
      - Algorithm instances !
      + -
      -
      -
        - -
      • -
        -

        Field Details

        -
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/fun/package-frame.html b/docs/jdocs/neureka/backend/api/fun/package-frame.html new file mode 100644 index 000000000..509c4116c --- /dev/null +++ b/docs/jdocs/neureka/backend/api/fun/package-frame.html @@ -0,0 +1,23 @@ + + + + + +neureka.backend.api.fun (neureka 1.0.1 API) + + + + +

    neureka.backend.api.fun

    + + + diff --git a/docs/jdocs/neureka/backend/api/fun/package-summary.html b/docs/jdocs/neureka/backend/api/fun/package-summary.html index 35f4cc096..b1fd9a3d7 100644 --- a/docs/jdocs/neureka/backend/api/fun/package-summary.html +++ b/docs/jdocs/neureka/backend/api/fun/package-summary.html @@ -1,120 +1,172 @@ - + + - -neureka.backend.api.fun (neureka 1.0.0 API) - - - - + +neureka.backend.api.fun (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.api.fun

    -
    -
    -
    package neureka.backend.api.fun
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/fun/package-tree.html b/docs/jdocs/neureka/backend/api/fun/package-tree.html index 6fe60f1a3..8538f54e9 100644 --- a/docs/jdocs/neureka/backend/api/fun/package-tree.html +++ b/docs/jdocs/neureka/backend/api/fun/package-tree.html @@ -1,71 +1,134 @@ - + + - -neureka.backend.api.fun Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.api.fun Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.api.fun

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/BackendLoader.html b/docs/jdocs/neureka/backend/api/ini/BackendLoader.html index 6e0d5ce54..9a2a828e6 100644 --- a/docs/jdocs/neureka/backend/api/ini/BackendLoader.html +++ b/docs/jdocs/neureka/backend/api/ini/BackendLoader.html @@ -1,124 +1,218 @@ - + + - -BackendLoader (neureka 1.0.0 API) - - - - + +BackendLoader (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface BackendLoader

    +
    neureka.backend.api.ini
    +

    Interface BackendLoader

    -
    +
    +
    +
      +

    • -
      public interface BackendLoader
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      void
      - -
       
      -
      -
      +
      +
      public interface BackendLoader
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/BackendRegistry.html b/docs/jdocs/neureka/backend/api/ini/BackendRegistry.html index a8997e4c9..a680f448e 100644 --- a/docs/jdocs/neureka/backend/api/ini/BackendRegistry.html +++ b/docs/jdocs/neureka/backend/api/ini/BackendRegistry.html @@ -1,140 +1,247 @@ - + + - -BackendRegistry (neureka 1.0.0 API) - - - - + +BackendRegistry (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class BackendRegistry

    +
    neureka.backend.api.ini
    +

    Class BackendRegistry

    -
    java.lang.Object -
    neureka.backend.api.ini.BackendRegistry
    -
    -
    -
    -
    public final class BackendRegistry -extends Object
    -
    -
    -
    +
    +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/ImplementationReceiver.html b/docs/jdocs/neureka/backend/api/ini/ImplementationReceiver.html index 1819906e6..054363e0a 100644 --- a/docs/jdocs/neureka/backend/api/ini/ImplementationReceiver.html +++ b/docs/jdocs/neureka/backend/api/ini/ImplementationReceiver.html @@ -1,130 +1,224 @@ - + + - -ImplementationReceiver (neureka 1.0.0 API) - - - - + +ImplementationReceiver (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ImplementationReceiver

    +
    neureka.backend.api.ini
    +

    Interface ImplementationReceiver

    -
    +
    +
    +
      +

    • -
      public interface ImplementationReceiver
      -
    -
    -
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/LoadingContext.html b/docs/jdocs/neureka/backend/api/ini/LoadingContext.html index dcdc87b0e..0fbb73cde 100644 --- a/docs/jdocs/neureka/backend/api/ini/LoadingContext.html +++ b/docs/jdocs/neureka/backend/api/ini/LoadingContext.html @@ -1,133 +1,231 @@ - + + - -LoadingContext (neureka 1.0.0 API) - - - - + +LoadingContext (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface LoadingContext

    +
    neureka.backend.api.ini
    +

    Interface LoadingContext

    -
    +
    +
    +
      +

    • -
      public interface LoadingContext
      -
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        getAlgorithmName

        -
        String getAlgorithmName()
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            getAlgorithmName

            +
            java.lang.String getAlgorithmName()
          • -
          • -
            -

            getOperationIdentidier

            -
            String getOperationIdentidier()
            -
            +
          + + + +
            +
          • +

            getOperationIdentidier

            +
            java.lang.String getOperationIdentidier()
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/ReceiveForDevice.html b/docs/jdocs/neureka/backend/api/ini/ReceiveForDevice.html index 61c56234d..0a2ab8d66 100644 --- a/docs/jdocs/neureka/backend/api/ini/ReceiveForDevice.html +++ b/docs/jdocs/neureka/backend/api/ini/ReceiveForDevice.html @@ -1,137 +1,235 @@ - + + - -ReceiveForDevice (neureka 1.0.0 API) - - - - + +ReceiveForDevice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ReceiveForDevice<D extends Device<?>>

    +
    neureka.backend.api.ini
    +

    Interface ReceiveForDevice<D extends Device<?>>

    -
    +
    +
    +
      +

    • -
      public interface ReceiveForDevice<D extends Device<?>>
      -
    -
    -
    - +
    +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/ReceiveForOperation.html b/docs/jdocs/neureka/backend/api/ini/ReceiveForOperation.html index f4db42a5e..bff7abffa 100644 --- a/docs/jdocs/neureka/backend/api/ini/ReceiveForOperation.html +++ b/docs/jdocs/neureka/backend/api/ini/ReceiveForOperation.html @@ -1,126 +1,220 @@ - + + - -ReceiveForOperation (neureka 1.0.0 API) - - - - + +ReceiveForOperation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ReceiveForOperation<D extends Device<?>>

    +
    neureka.backend.api.ini
    +

    Interface ReceiveForOperation<D extends Device<?>>

    -
    +
    +
    +
      +

    • -
      public interface ReceiveForOperation<D extends Device<?>>
      -
    -
    -
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/package-frame.html b/docs/jdocs/neureka/backend/api/ini/package-frame.html new file mode 100644 index 000000000..a7721808c --- /dev/null +++ b/docs/jdocs/neureka/backend/api/ini/package-frame.html @@ -0,0 +1,27 @@ + + + + + +neureka.backend.api.ini (neureka 1.0.1 API) + + + + +

    neureka.backend.api.ini

    + + + diff --git a/docs/jdocs/neureka/backend/api/ini/package-summary.html b/docs/jdocs/neureka/backend/api/ini/package-summary.html index af68692df..4dc70a950 100644 --- a/docs/jdocs/neureka/backend/api/ini/package-summary.html +++ b/docs/jdocs/neureka/backend/api/ini/package-summary.html @@ -1,111 +1,170 @@ - + + - -neureka.backend.api.ini (neureka 1.0.0 API) - - - - + +neureka.backend.api.ini (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.api.ini

    -
    -
    -
    package neureka.backend.api.ini
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/ini/package-tree.html b/docs/jdocs/neureka/backend/api/ini/package-tree.html index 5b478f974..0e3e12201 100644 --- a/docs/jdocs/neureka/backend/api/ini/package-tree.html +++ b/docs/jdocs/neureka/backend/api/ini/package-tree.html @@ -1,81 +1,142 @@ - + + - -neureka.backend.api.ini Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.api.ini Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.api.ini

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/package-frame.html b/docs/jdocs/neureka/backend/api/package-frame.html new file mode 100644 index 000000000..ece69ff17 --- /dev/null +++ b/docs/jdocs/neureka/backend/api/package-frame.html @@ -0,0 +1,47 @@ + + + + + +neureka.backend.api (neureka 1.0.1 API) + + + + +

    neureka.backend.api

    + + + diff --git a/docs/jdocs/neureka/backend/api/package-summary.html b/docs/jdocs/neureka/backend/api/package-summary.html index 552ffbfd0..e9d858074 100644 --- a/docs/jdocs/neureka/backend/api/package-summary.html +++ b/docs/jdocs/neureka/backend/api/package-summary.html @@ -1,187 +1,291 @@ - + + - -neureka.backend.api (neureka 1.0.0 API) - - - - + +neureka.backend.api (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.api

    +

    Package neureka.backend.api

    -
    -
    package neureka.backend.api
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/package-tree.html b/docs/jdocs/neureka/backend/api/package-tree.html index fbbb10ddb..2e507799c 100644 --- a/docs/jdocs/neureka/backend/api/package-tree.html +++ b/docs/jdocs/neureka/backend/api/package-tree.html @@ -1,149 +1,208 @@ - + + - -neureka.backend.api Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.api Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.api

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/AbstractDeviceAlgorithm.html b/docs/jdocs/neureka/backend/api/template/algorithms/AbstractDeviceAlgorithm.html index e00571ab6..95780d19a 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/AbstractDeviceAlgorithm.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/AbstractDeviceAlgorithm.html @@ -1,386 +1,564 @@ - + + - -AbstractDeviceAlgorithm (neureka 1.0.0 API) - - - - + +AbstractDeviceAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractDeviceAlgorithm<C extends DeviceAlgorithm<C>>

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C>
    +
    neureka.backend.api.template.algorithms
    +

    Class AbstractDeviceAlgorithm<C extends DeviceAlgorithm<C>>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C>
      • +
      +
    • +
    +
    +
    -
    -
    -
    -

    Fields inherited from interface neureka.backend.api.fun.SuitabilityPredicate

    -BAD, EXCELLENT, GOOD, NOT_GOOD, OKAY, PERFECT, TERRIBLE, UNSUITABLE, VERY_GOOD
    - +
    +
    +
    + -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      AbstractDeviceAlgorithm

      -
      public AbstractDeviceAlgorithm(String name)
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          AbstractDeviceAlgorithm

          +
          public AbstractDeviceAlgorithm(java.lang.String name)
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      setImplementationFor

      -
      public <D extends Device<?>, -E extends ImplementationFor<D>> -C setImplementationFor(Class<D> deviceClass, - E implementation)
      -
      Description copied from interface: DeviceAlgorithm
      -
      Implementations of the DeviceAlgorithm interface ought to express a compositional design pattern.
      +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          setImplementationFor

          +
          public <D extends Device<?>,E extends ImplementationFor<D>> C setImplementationFor(java.lang.Class<D> deviceClass,
          +                                                                                   E implementation)
          +
          Description copied from interface: DeviceAlgorithm
          +
          Implementations of the DeviceAlgorithm interface ought to express a compositional design pattern.
          This means that concrete implementations of an algorithm for a device are not extending an Algorithm, they are components of it instead.
          These components can be stored on an Algorithm by passing a Device class as key and an ImplementationFor instance as value.
          -
          -
          Specified by:
          -
          setImplementationFor in interface DeviceAlgorithm<C extends DeviceAlgorithm<C>>
          -
          Type Parameters:
          -
          D - The type parameter of the Device type for which - an implementation should be set in this Device.
          -
          E - The type of the ImplementationFor the provided Device type.
          -
          Parameters:
          -
          deviceClass - The class of the Device for which an implementation should be set.
          -
          implementation - The ImplementationFor the provided Device type.
          -
          Returns:
          -
          This very Algorithm instance to allow for method chaining.
          +
          +
          Specified by:
          +
          setImplementationFor in interface DeviceAlgorithm<C extends DeviceAlgorithm<C>>
          +
          Type Parameters:
          +
          D - The type parameter of the Device type for which + an implementation should be set in this Device.
          +
          E - The type of the ImplementationFor the provided Device type.
          +
          Parameters:
          +
          deviceClass - The class of the Device for which an implementation should be set.
          +
          implementation - The ImplementationFor the provided Device type.
          +
          Returns:
          +
          This very Algorithm instance to allow for method chaining.
          -
    • -
    • -
      -

      getImplementationFor

      -
      public <D extends Device<?>> ImplementationFor<D> getImplementationFor(Class<D> deviceClass)
      -
      Description copied from interface: DeviceAlgorithm
      -
      An ImplementationFor a specific Device can be accessed by passing the class of - the Device for which an implementation should be returned. - An Algorithm instance ought to contain a collection of these Device specific +
    + + + +
      +
    • +

      getImplementationFor

      +
      public <D extends Device<?>> ImplementationFor<D> getImplementationFor(java.lang.Class<D> deviceClass)
      +
      Description copied from interface: DeviceAlgorithm
      +
      An ImplementationFor a specific Device can be accessed by passing the class of + the Device for which an implementation should be returned. + An Algorithm instance ought to contain a collection of these Device specific implementations...
      -
      -
      Specified by:
      -
      getImplementationFor in interface DeviceAlgorithm<C extends DeviceAlgorithm<C>>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      getImplementationFor in interface DeviceAlgorithm<C extends DeviceAlgorithm<C>>
      +
      Type Parameters:
      D - The type parameter which has to be a class extending the Device interface.
      -
      Parameters:
      +
      Parameters:
      deviceClass - The class of the device for which the stored algorithm implementation should be returned.
      -
      Returns:
      +
      Returns:
      The implementation for the passed device type class.
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • -
  • -
    -

    executeFor

    -
    public static Tensor<?> executeFor(Function caller, - ExecutionCall<? extends Device<?>> call, - FinalExecutor executor)
    -
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
    • +

      executeOnCommonDevice

      +
      public static <R> R executeOnCommonDevice(ExecutionCall<?> call,
      +                                          java.util.function.Supplier<R> execution)
    • -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
    + + + +
      +
    • +

      getName

      +
      public java.lang.String getName()
      +
      This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
      -
      -
      Specified by:
      -
      getName in interface Algorithm
      -
      Returns:
      -
      The name of this Algorithm.
      +
      +
      Specified by:
      +
      getName in interface Algorithm
      +
      Returns:
      +
      The name of this Algorithm.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunAlgorithm.html b/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunAlgorithm.html index 0861b20d8..0014d7a71 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunAlgorithm.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunAlgorithm.html @@ -1,323 +1,474 @@ - + + - -AbstractFunAlgorithm (neureka 1.0.0 API) - - - - + +AbstractFunAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractFunAlgorithm

    +
    neureka.backend.api.template.algorithms
    +

    Class AbstractFunAlgorithm

    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractFunAlgorithm
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.template.algorithms.AbstractFunAlgorithm
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class AbstractFunAlgorithm
      +extends java.lang.Object
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AbstractFunAlgorithm

        -
        protected AbstractFunAlgorithm(String name)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AbstractFunAlgorithm

            +
            protected AbstractFunAlgorithm(java.lang.String name)
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    setIsSuitableFor

    -
    public final AbstractFunAlgorithm setIsSuitableFor(SuitabilityPredicate isSuitableFor)
    -
    The SuitabilityPredicate received by this method - checks if a given instance of an ExecutionCall is - suitable to be executed in ImplementationFor instances - residing in this Algorithm as components. - The lambda will be called by the isSuitableFor(ExecutionCall) method - by any given Operation instances this algorithm belongs to.
    -
    -
    Parameters:
    + + + + +
  • -
  • -
    -

    setAutogradModeFor

    -
    public final AbstractFunAlgorithm setAutogradModeFor(ADSupportPredicate autogradModeFor)
    -
    A ADSupportPredicate lambda checks what kind of auto differentiation mode an - Algorithm supports for a given ExecutionCall. - The lambda will be called by the autoDiffModeFrom(ExecutionCall) method - by any given Operation instances this algorithm belongs to.
    -
    -
    Parameters:
    + + + + +
  • -
  • -
    -

    autoDiffModeFrom

    -
    public AutoDiffMode autoDiffModeFrom(ExecutionCall<? extends Device<?>> call)
    -
    Description copied from interface: ADSupportPredicate
    + + + + +
  • +
    +
    Parameters:
    +
    call - The ExecutionCall which should be checked.
    +
    Returns:
    +
    A AutoDiffMode enum instance describing what kind of differentiation can be performed.
    - -
  • -
    -

    setExecution

    -
    public final AbstractFunAlgorithm setExecution(Execution execution)
    -
    + + + + + + + + + + + + +
      +
    • +

      getName

      +
      public java.lang.String getName()
      +
      This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
      -
      -
      Specified by:
      -
      getName in interface Algorithm
      -
      Returns:
      -
      The name of this Algorithm.
      +
      +
      Specified by:
      +
      getName in interface Algorithm
      +
      Returns:
      +
      The name of this Algorithm.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunDeviceAlgorithm.html b/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunDeviceAlgorithm.html index 7573b95a1..af88aa0df 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunDeviceAlgorithm.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/AbstractFunDeviceAlgorithm.html @@ -1,335 +1,436 @@ - + + - -AbstractFunDeviceAlgorithm (neureka 1.0.0 API) - - - - + +AbstractFunDeviceAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractFunDeviceAlgorithm<C extends DeviceAlgorithm<C>>

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> -
    neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm<C>
    -
    +
    neureka.backend.api.template.algorithms
    +

    Class AbstractFunDeviceAlgorithm<C extends DeviceAlgorithm<C>>

    -
    -
    -
    Type Parameters:
    +
    + +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AbstractFunDeviceAlgorithm

        -
        public AbstractFunDeviceAlgorithm(String name)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AbstractFunDeviceAlgorithm

            +
            public AbstractFunDeviceAlgorithm(java.lang.String name)
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    buildFunAlgorithm

    -
    public final C buildFunAlgorithm()
    -
    -
    Returns:
    -
    A new concrete implementation of the AbstractFunDeviceAlgorithm which - is fully built and ready to be used as an Operation component.
    + + + + +
      +
    • +

      buildFunAlgorithm

      +
      public final C buildFunAlgorithm()
      +
      +
      Returns:
      +
      A new concrete implementation of the AbstractFunDeviceAlgorithm which + is fully built and ready to be used as an Operation component.
      -
  • -
  • -
    -

    setIsSuitableFor

    -
    public final AbstractFunDeviceAlgorithm<C> setIsSuitableFor(SuitabilityPredicate isSuitableFor)
    -
    The SuitabilityPredicate received by this method - checks if a given instance of an ExecutionCall is - suitable to be executed in ImplementationFor instances - residing in this Algorithm as components. - The lambda will be called by the isSuitableFor(ExecutionCall) method - by any given Operation instances this algorithm belongs to.
    -
    -
    Parameters:
    + + + + +
  • -
  • -
    -

    setSupplyADActionFor

    -
    public final AbstractFunDeviceAlgorithm<C> setSupplyADActionFor(ADActionSupplier supplyADActionFor)
    -
    This method receives a ADActionSupplier which will supply - ADAction instances which can perform backward and forward auto differentiation.
    -
    -
    Parameters:
    -
    supplyADActionFor - A supplier for an ADAction containing implementation details for autograd.
    -
    Returns:
    + + + + +
      +
    • +

      setSupplyADActionFor

      +
      public final AbstractFunDeviceAlgorithm<C> setSupplyADActionFor(ADActionSupplier supplyADActionFor)
      +
      This method receives a ADActionSupplier which will supply + ADAction instances which can perform backward and forward auto differentiation.
      +
      +
      Parameters:
      +
      supplyADActionFor - A supplier for an ADAction containing implementation details for autograd.
      +
      Returns:
      This very instance to enable method chaining.
      -
  • -
  • -
    -

    setCallPreparation

    -
    public final AbstractFunDeviceAlgorithm<C> setCallPreparation(ExecutionPreparation instantiateNewTensorsForExecutionIn)
    -
    An Algorithm will produce a Result when executing an ExecutionCall. + + + + +
      +
    • +

      setCallPreparation

      +
      public final AbstractFunDeviceAlgorithm<C> setCallPreparation(ExecutionPreparation instantiateNewTensorsForExecutionIn)
      +
      An Algorithm will produce a Result when executing an ExecutionCall. This result must be created somehow. - A ExecutionPreparation implementation instance will do just that... + A ExecutionPreparation implementation instance will do just that... Often times the first entry in the array of tensors stored inside the call will be null to serve as a position for the output to be placed at. The creation of this output tensor is of course highly dependent on the type @@ -337,95 +438,169 @@

      setCallPreparation

      Element-wise operations for example will require the creation of an output tensor with the shape of the provided input tensors, whereas the execution of a linear operation like for example a broadcast operation will require a very different approach... - The lambda passed to this will be called by the prepare(ExecutionCall) method - by any given Operation instances this algorithm belongs to.
      -
      -
      Parameters:
      + The lambda passed to this will be called by the prepare(ExecutionCall) method + by any given Operation instances this algorithm belongs to.
    +
    +
    Parameters:
    instantiateNewTensorsForExecutionIn - A lambda which prepares the provided execution call (usually output instantiation).
    -
    Returns:
    +
    Returns:
    This very instance to enable method chaining.
    -
  • -
  • -
    -

    setAutogradModeFor

    -
    public final AbstractFunDeviceAlgorithm<C> setAutogradModeFor(ADSupportPredicate autogradModeFor)
    -
    A ADSupportPredicate lambda checks what kind of auto differentiation mode an - Algorithm supports for a given ExecutionCall. - The lambda will be called by the autoDiffModeFrom(ExecutionCall) method - by any given Operation instances this algorithm belongs to.
    -
    -
    Parameters:
    + + + + +
  • -
  • -
    -

    autoDiffModeFrom

    -
    public AutoDiffMode autoDiffModeFrom(ExecutionCall<? extends Device<?>> call)
    -
    Description copied from interface: ADSupportPredicate
    + + + + +
  • +
    +
    Specified by:
    +
    autoDiffModeFrom in interface ADSupportPredicate
    +
    Parameters:
    +
    call - The ExecutionCall which should be checked.
    +
    Returns:
    +
    A AutoDiffMode enum instance describing what kind of differentiation can be performed.
    - -
  • -
    -

    setExecution

    -
    public AbstractFunDeviceAlgorithm<C> setExecution(Execution execution)
    -
    + + + + + + + + + + + + +
      +
    • +

      getName

      +
      public java.lang.String getName()
      +
      This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
      -
      -
      Specified by:
      -
      getName in interface Algorithm
      -
      Returns:
      -
      The name of this Algorithm.
      +
      +
      Specified by:
      +
      getName in interface Algorithm
      +
      Returns:
      +
      The name of this Algorithm.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/FallbackAlgorithm.html b/docs/jdocs/neureka/backend/api/template/algorithms/FallbackAlgorithm.html index ee007661d..ca66ad08c 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/FallbackAlgorithm.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/FallbackAlgorithm.html @@ -1,353 +1,516 @@ - + + - -FallbackAlgorithm (neureka 1.0.0 API) - - - - + +FallbackAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FallbackAlgorithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<FallbackAlgorithm> -
    neureka.backend.api.template.algorithms.FallbackAlgorithm
    -
    +
    neureka.backend.api.template.algorithms
    +

    Class FallbackAlgorithm

    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FallbackAlgorithm

        -
        public FallbackAlgorithm(String name, - int arity, - Operation type)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FallbackAlgorithm

            +
            public FallbackAlgorithm(java.lang.String name,
            +                         int arity,
            +                         Operation type)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      isSuitableFor

      -
      public float isSuitableFor(ExecutionCall<? extends Device<?>> call)
      -
      Description copied from interface: SuitabilityPredicate
      -
      When an ExecutionCall instance has been formed then it will be routed by
      - the given Operation instance to their components, namely :
      - Algorithm instances !
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isSuitableFor

          +
          public float isSuitableFor(ExecutionCall<? extends Device<?>> call)
          +
          Description copied from interface: SuitabilityPredicate
          +
          When an ExecutionCall instance has been formed then it will be routed by
          + the given Operation instance to their components, namely :
          + Algorithm instances !
          - The ability to decide which algorithm is suitable for a given ExecutionCall instance
          + The ability to decide which algorithm is suitable for a given ExecutionCall instance
          is being granted by implementations of the following method.
          It returns a float representing the suitability of a given call.
          The float is expected to be between 0 and 1, where 0 means
          that the implementation is not suitable at all and 1 means that
          that it fits the call best!
          -
          -
          Specified by:
          -
          isSuitableFor in interface SuitabilityPredicate
          -
          Parameters:
          -
          call - The ExecutionCall whose suitability for execution on this Algorithm ought to be determined.
          -
          Returns:
          +
          +
          Specified by:
          +
          isSuitableFor in interface SuitabilityPredicate
          +
          Parameters:
          +
          call - The ExecutionCall whose suitability for execution on this Algorithm ought to be determined.
          +
          Returns:
          The suitability degree expressed by a float value between 0 and 1, where 0 means not suitable and 1 means suitable.
          -
    • -
    • -
      -

      supplyADActionFor

      -
      public ADAction supplyADActionFor(Function function, - ExecutionCall<? extends Device<?>> call)
      -
      Description copied from interface: ADActionSupplier
      +
    + + + +
      +
    • +

      supplyADActionFor

      +
      public ADAction supplyADActionFor(Function function,
      +                                  ExecutionCall<? extends Device<?>> call)
      +
      Description copied from interface: ADActionSupplier
      This method ought to return a new instance - if the ADAction class responsible for performing automatic differentiation + if the ADAction class responsible for performing automatic differentiation both for forward and backward mode differentiation.
      - Therefore an ADAction exposes 2 different procedures.
      + Therefore an ADAction exposes 2 different procedures.
      One is the forward mode differentiation, and the other one
      is the backward mode differentiation which is more commonly known as back-propagation...
      Besides that it may also contain context information used
      to perform said procedures.
      -
      -
      Specified by:
      -
      supplyADActionFor in interface ADActionSupplier
      -
      Parameters:
      +
      +
      Specified by:
      +
      supplyADActionFor in interface ADActionSupplier
      +
      Parameters:
      function - The function from where the request for auto differentiation originates.
      call - The execution call of the current execution which requires auto differentiation support.
      -
      Returns:
      -
      The resulting ADAction.
      +
      Returns:
      +
      The resulting ADAction.
      -
  • -
  • -
    -

    ADAction

    -
    public static ADAction ADAction(Function function, - ExecutionCall<? extends Device<?>> call)
    -
    + + + + + + + + + + + + + + + + +
  • +
    +
    Specified by:
    +
    autoDiffModeFrom in interface ADSupportPredicate
    +
    Parameters:
    +
    call - The ExecutionCall which should be checked.
    +
    Returns:
    +
    A AutoDiffMode enum instance describing what kind of differentiation can be performed.
    - -
  • -
    -

    execute

    -
    public Result execute(Function caller, - ExecutionCall<? extends Device<?>> call)
    -
    -
    Specified by:
    -
    execute in interface Execution
    -
    Parameters:
    -
    caller - The caller Function from which the request for execution originated.
    -
    call - The ExecutionCall which should be executed.
    -
    Returns:
    -
    A Result instance wrapping a Tensor and optionally also an ADActionSupplier.
    + + + + +
  • -
  • -
    -

    getName

    -
    public String getName()
    -
    This method returns the name of this Algorithm + + + + +
      +
    • +

      getName

      +
      public java.lang.String getName()
      +
      This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
      -
      -
      Specified by:
      -
      getName in interface Algorithm
      -
      Returns:
      -
      The name of this Algorithm.
      +
      +
      Specified by:
      +
      getName in interface Algorithm
      +
      Returns:
      +
      The name of this Algorithm.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/FunAlgorithm.html b/docs/jdocs/neureka/backend/api/template/algorithms/FunAlgorithm.html index 49f942842..8b238f281 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/FunAlgorithm.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/FunAlgorithm.html @@ -1,189 +1,319 @@ - + + - -FunAlgorithm (neureka 1.0.0 API) - - - - + +FunAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FunAlgorithm

    +
    neureka.backend.api.template.algorithms
    +

    Class FunAlgorithm

    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractFunAlgorithm -
    neureka.backend.api.template.algorithms.FunAlgorithm
    -
    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FunAlgorithm

        -
        public FunAlgorithm(String name)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FunAlgorithm

            +
            public FunAlgorithm(java.lang.String name)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/FunDeviceAlgorithm.html b/docs/jdocs/neureka/backend/api/template/algorithms/FunDeviceAlgorithm.html index 2f313b604..f411797c0 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/FunDeviceAlgorithm.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/FunDeviceAlgorithm.html @@ -1,203 +1,345 @@ - + + - -FunDeviceAlgorithm (neureka 1.0.0 API) - - - - + +FunDeviceAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FunDeviceAlgorithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> -
    neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm<FunDeviceAlgorithm> -
    neureka.backend.api.template.algorithms.FunDeviceAlgorithm
    -
    +
    neureka.backend.api.template.algorithms
    +

    Class FunDeviceAlgorithm

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FunDeviceAlgorithm

        -
        public FunDeviceAlgorithm(String name)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FunDeviceAlgorithm

            +
            public FunDeviceAlgorithm(java.lang.String name)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/package-frame.html b/docs/jdocs/neureka/backend/api/template/algorithms/package-frame.html new file mode 100644 index 000000000..34f7b9d30 --- /dev/null +++ b/docs/jdocs/neureka/backend/api/template/algorithms/package-frame.html @@ -0,0 +1,24 @@ + + + + + +neureka.backend.api.template.algorithms (neureka 1.0.1 API) + + + + +

    neureka.backend.api.template.algorithms

    + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/package-summary.html b/docs/jdocs/neureka/backend/api/template/algorithms/package-summary.html index e517eb85b..6f68ab429 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/package-summary.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/package-summary.html @@ -1,97 +1,164 @@ - + + - -neureka.backend.api.template.algorithms (neureka 1.0.0 API) - - - - + +neureka.backend.api.template.algorithms (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.api.template.algorithms

    -
    -
    -
    package neureka.backend.api.template.algorithms
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/template/algorithms/package-tree.html b/docs/jdocs/neureka/backend/api/template/algorithms/package-tree.html index 72ce05245..74752500f 100644 --- a/docs/jdocs/neureka/backend/api/template/algorithms/package-tree.html +++ b/docs/jdocs/neureka/backend/api/template/algorithms/package-tree.html @@ -1,85 +1,148 @@ - + + - -neureka.backend.api.template.algorithms Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.api.template.algorithms Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.api.template.algorithms

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/template/implementations/AbstractImplementationFor.html b/docs/jdocs/neureka/backend/api/template/implementations/AbstractImplementationFor.html index 61c0d4e99..3c4a1322a 100644 --- a/docs/jdocs/neureka/backend/api/template/implementations/AbstractImplementationFor.html +++ b/docs/jdocs/neureka/backend/api/template/implementations/AbstractImplementationFor.html @@ -1,186 +1,296 @@ - + + - -AbstractImplementationFor (neureka 1.0.0 API) - - - - + +AbstractImplementationFor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractImplementationFor<D extends Device<?>>

    +
    neureka.backend.api.template.implementations
    +

    Class AbstractImplementationFor<D extends Device<?>>

    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<D>
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.template.implementations.AbstractImplementationFor<D>
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AbstractImplementationFor

        -
        public AbstractImplementationFor(ImplementationFor<D> implementationLambda, - int arity)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AbstractImplementationFor

            +
            public AbstractImplementationFor(ImplementationFor<D> implementationLambda,
            +                                 int arity)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<D> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<D> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<D extends Device<?>>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<D extends Device<?>>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/implementations/package-frame.html b/docs/jdocs/neureka/backend/api/template/implementations/package-frame.html new file mode 100644 index 000000000..3b9f226f9 --- /dev/null +++ b/docs/jdocs/neureka/backend/api/template/implementations/package-frame.html @@ -0,0 +1,19 @@ + + + + + +neureka.backend.api.template.implementations (neureka 1.0.1 API) + + + + +

    neureka.backend.api.template.implementations

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/api/template/implementations/package-summary.html b/docs/jdocs/neureka/backend/api/template/implementations/package-summary.html index 2b4c1d8ef..e0d33a1c1 100644 --- a/docs/jdocs/neureka/backend/api/template/implementations/package-summary.html +++ b/docs/jdocs/neureka/backend/api/template/implementations/package-summary.html @@ -1,82 +1,139 @@ - + + - -neureka.backend.api.template.implementations (neureka 1.0.0 API) - - - - + +neureka.backend.api.template.implementations (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.api.template.implementations

    -
    -
    -
    package neureka.backend.api.template.implementations
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/template/implementations/package-tree.html b/docs/jdocs/neureka/backend/api/template/implementations/package-tree.html index 3a57b6b40..b122388f0 100644 --- a/docs/jdocs/neureka/backend/api/template/implementations/package-tree.html +++ b/docs/jdocs/neureka/backend/api/template/implementations/package-tree.html @@ -1,71 +1,134 @@ - + + - -neureka.backend.api.template.implementations Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.api.template.implementations Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.api.template.implementations

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/template/operations/AbstractOperation.html b/docs/jdocs/neureka/backend/api/template/operations/AbstractOperation.html index 5f3d3dd85..6db8ff1df 100644 --- a/docs/jdocs/neureka/backend/api/template/operations/AbstractOperation.html +++ b/docs/jdocs/neureka/backend/api/template/operations/AbstractOperation.html @@ -1,283 +1,362 @@ - + + - -AbstractOperation (neureka 1.0.0 API) - - - - + +AbstractOperation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractOperation

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation
    +
    neureka.backend.api.template.operations
    +

    Class AbstractOperation

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.template.operations.AbstractOperation
      • +
      +
    • +
    +
    +
    -
    -
      + +
    +
    +
    +
    -
    final <T extends Algorithm>
    Operation
    -
    setAlgorithm(Class<T> type, - T instance)
    -
    -
    Operation implementations embody a component system hosting unique Algorithm instances.
    -
    - -
    stringify(String[] children)
    -
    -
    final <T extends Algorithm>
    boolean
    -
    supports(Class<T> implementation)
    -
     
    -
    final <T extends Algorithm>
    boolean
    - -
    -
    This method checks if this Operation contains an instance of the - Algorithm implementation specified via its type class.
    -
    -
    final String
    - -
     
    -
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.backend.api.Operation

    -calculate, execute, setAlgorithm
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          _function

          -
          protected final String _function
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              _function

              +
              protected final java.lang.String _function
              An operation may have two ways in which it can describe itself as String within a Function AST. The first one is an operator style of representation and the second one a classical function. So for the 'Addition' operation the following two representations exist:
              @@ -286,12 +365,15 @@

              _function

            • Function: 'add'; Example: 'add( I[0], 3, 5*I[1] )'
            The following String is the latter way of representing the operation, namely: a functional way.
      -
    -
  • -
    -

    _operator

    -
    protected final String _operator
    + + + + +
      +
    • +

      _operator

      +
      protected final java.lang.String _operator
      An operation may have two ways in which it can describe itself as String within a Function AST. The first one is an operator style of representation and the second one a classical function. So for the 'Addition' operation the following two representations exist:
      @@ -300,346 +382,479 @@

      _operator

    • Function: 'add'; Example: 'add( I[0], 3, 5*I[1] )'
    The following String is the primary way of representing the operation, namely: as an operator.
  • - -
  • -
    -

    _arity

    -
    protected final int _arity
    + + + + +
      +
    • +

      _arity

      +
      protected final int _arity
      Arity is the number of arguments or operands that this function or operation takes.
      -
  • -
  • -
    -

    _isIndexer

    -
    protected final boolean _isIndexer
    + + + + +
      +
    • +

      _isIndexer

      +
      protected final boolean _isIndexer
      This flag determines if this operation is auto-indexing passed input arguments. Auto-indexing inputs means that for a given array of input arguments the wrapping Function instance will call its child nodes targeted via an index incrementally. The variable 'j' in a Functions expressions containing 'I[j]' will then be resolved to an actual input for a given indexer...
      -
  • -
  • -
    -

    _isDifferentiable

    -
    protected final boolean _isDifferentiable
    + + + + +
      +
    • +

      _isDifferentiable

      +
      protected final boolean _isDifferentiable
      Certain operations are not differentiable, meaning they cannot participate in neither forward nor reverse mode differentiation. In order to avoid error-prone behaviour trying to involve non- differentiable operations will yield proper exceptions.
      -
  • -
  • -
    -

    _isInline

    -
    protected final boolean _isInline
    + + + + +
      +
    • +

      _isInline

      +
      protected final boolean _isInline
      Inline operations are operations which change the state of the arguments passed to them.
      -
  • -
  • -
    -

    _isOperator

    -
    protected final boolean _isOperator
    -
    + + + + +
      +
    • +

      _isOperator

      +
      protected final boolean _isOperator
    -
  • + -
  • -
    -

    Constructor Details

    -
  • + -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      getAlgorithm

      +
      public final <T extends Algorithm> T getAlgorithm(java.lang.Class<T> type)
      +
      Operation implementations embody a component system hosting unique Algorithm instances. + For a given class implementing the Algorithm class, there can only be a single + instance of it referenced (aka supported) by a given Operation instance. This method ensures this in terms of read access by returning only a single instance or null - based on the provided class instance whose type extends the Algorithm interface.
      -
      -
      Specified by:
      -
      getAlgorithm in interface Operation
      -
      Type Parameters:
      -
      T - The type parameter of the Algorithm type class.
      -
      Parameters:
      -
      type - The class of the type which implements Algorithm as a key to get an existing instance.
      -
      Returns:
      -
      The instance of the specified type if any exists within this Operation.
      + based on the provided class instance whose type extends the Algorithm interface.
  • +
    +
    Specified by:
    +
    getAlgorithm in interface Operation
    +
    Type Parameters:
    +
    T - The type parameter of the Algorithm type class.
    +
    Parameters:
    +
    type - The class of the type which implements Algorithm as a key to get an existing instance.
    +
    Returns:
    +
    The instance of the specified type if any exists within this Operation.
    - -
  • -
    -

    supportsAlgorithm

    -
    public final <T extends Algorithm> boolean supportsAlgorithm(Class<T> type)
    -
    This method checks if this Operation contains an instance of the - Algorithm implementation specified via its type class.
    -
    -
    Specified by:
    -
    supportsAlgorithm in interface Operation
    -
    Type Parameters:
    -
    T - The type parameter of the Algorithm type class.
    -
    Parameters:
    -
    type - The class of the type which implements Algorithm.
    -
    Returns:
    -
    The truth value determining if this Operation contains an instance of the specified Algorithm type.
    + + + + +
      +
    • +

      supportsAlgorithm

      +
      public final <T extends Algorithm> boolean supportsAlgorithm(java.lang.Class<T> type)
      +
      This method checks if this Operation contains an instance of the + Algorithm implementation specified via its type class.
      +
      +
      Specified by:
      +
      supportsAlgorithm in interface Operation
      +
      Type Parameters:
      +
      T - The type parameter of the Algorithm type class.
      +
      Parameters:
      +
      type - The class of the type which implements Algorithm.
      +
      Returns:
      +
      The truth value determining if this Operation contains an instance of the specified Algorithm type.
      -
  • -
  • -
    -

    setAlgorithm

    -
    public final <T extends Algorithm> Operation setAlgorithm(Class<T> type, - T instance)
    -
    Operation implementations embody a component system hosting unique Algorithm instances. - For a given class implementing the Algorithm class, there can only be a single - instance of it referenced (aka supported) by a given Operation instance. - This method enables the registration of Algorithm types in the component system of this Operation.
    -
    -
    Specified by:
    -
    setAlgorithm in interface Operation
    -
    Type Parameters:
    -
    T - The type parameter of the Algorithm type class.
    -
    Parameters:
    -
    type - The class of the type which implements Algorithm as key for the provided instance.
    -
    instance - The instance of the provided type class which ought to be referenced (supported) by this Operation.
    -
    Returns:
    -
    This very Operation instance to enable method chaining on it.
    + + + + + + +
      +
    • +

      setAlgorithm

      +
      public final <T extends AlgorithmOperation setAlgorithm(java.lang.Class<T> type,
      +                                                          T instance)
      +
      Operation implementations embody a component system hosting unique Algorithm instances. + For a given class implementing the Algorithm class, there can only be a single + instance of it referenced (aka supported) by a given Operation instance. + This method enables the registration of Algorithm types in the component system of this Operation.
      +
      +
      Specified by:
      +
      setAlgorithm in interface Operation
      +
      Type Parameters:
      +
      T - The type parameter of the Algorithm type class.
      +
      Parameters:
      +
      type - The class of the type which implements Algorithm as key for the provided instance.
      +
      instance - The instance of the provided type class which ought to be referenced (supported) by this Operation.
      +
      Returns:
      +
      This very Operation instance to enable method chaining on it.
      -
  • -
  • -
    -

    getAlgorithmFor

    -
    public final Algorithm getAlgorithmFor(ExecutionCall<?> call)
    -
    Description copied from interface: Operation
    -
    Alongside a component system made up of Algorithm instances, implementations - of this interface also ought to express a routing mechanism which finds the best Algorithm - for a given ExecutionCall instance. + + + + +
      +
    • +

      getAlgorithmFor

      +
      public final Algorithm getAlgorithmFor(ExecutionCall<?> call)
      +
      Description copied from interface: Operation
      +
      Alongside a component system made up of Algorithm instances, implementations + of this interface also ought to express a routing mechanism which finds the best Algorithm + for a given ExecutionCall instance. This method signature describes this requirement.
      -
      -
      Specified by:
      -
      getAlgorithmFor in interface Operation
      -
      Parameters:
      -
      call - The ExecutionCall instance which needs the best Algorithm for execution.
      -
      Returns:
      -
      The chosen Algorithm which ought to be fir for execution the provided call.
      +
      +
      Specified by:
      +
      getAlgorithmFor in interface Operation
      +
      Parameters:
      +
      call - The ExecutionCall instance which needs the best Algorithm for execution.
      +
      Returns:
      +
      The chosen Algorithm which ought to be fir for execution the provided call.
      -
  • -
  • -
    -

    supports

    -
    public final <T extends Algorithm> boolean supports(Class<T> implementation)
    -
    -
    Specified by:
    -
    supports in interface Operation
    + + + + +
      +
    • +

      supports

      +
      public final <T extends Algorithm> boolean supports(java.lang.Class<T> implementation)
      +
      +
      Specified by:
      +
      supports in interface Operation
      -
  • -
  • -
    -

    isOperator

    -
    public final boolean isOperator()
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      isOperator

      +
      public final boolean isOperator()
      +
      Description copied from interface: Operation
      An operator is an alternative to a function like "sum()" or "prod()".
      Examples would be "+, -, * ..."!
      -
      -
      Specified by:
      -
      isOperator in interface Operation
      -
      Returns:
      +
      +
      Specified by:
      +
      isOperator in interface Operation
      +
      Returns:
      If this operation can be represented as operator like "+, -, * ..."!
      -
  • -
  • -
    -

    getIdentifier

    -
    public String getIdentifier()
    -
    Description copied from interface: Operation
    -
    Concrete Operation types ought to be representable by a function name. + + + + +
      +
    • +

      getIdentifier

      +
      public java.lang.String getIdentifier()
      +
      Description copied from interface: Operation
      +
      Concrete Operation types ought to be representable by a function name. The following ensures that this contract is met when overriding the method.
      -
      -
      Specified by:
      -
      getIdentifier in interface Operation
      -
      Returns:
      -
      the function name which serves as identifier when parsing Function instances.
      +
      +
      Specified by:
      +
      getIdentifier in interface Operation
      +
      Returns:
      +
      the function name which serves as identifier when parsing Function instances.
      -
  • -
  • -
    -

    getOperator

    -
    public final String getOperator()
    -
    -
    Specified by:
    -
    getOperator in interface Operation
    + + + + +
      +
    • +

      getOperator

      +
      public final java.lang.String getOperator()
      +
      +
      Specified by:
      +
      getOperator in interface Operation
      -
  • -
  • -
    -

    getArity

    -
    public final int getArity()
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      getArity

      +
      public final int getArity()
      +
      Description copied from interface: Operation
      Arity is the number of arguments or operands that this function or operation takes.
      -
      -
      Specified by:
      -
      getArity in interface Operation
      -
      Returns:
      +
      +
      Specified by:
      +
      getArity in interface Operation
      +
      Returns:
      The number of arguments expected by this operation, or -1 if an arbitrary number is accepted.
      -
  • -
  • -
    -

    isIndexer

    -
    public final boolean isIndexer()
    -
    Description copied from interface: Operation
    -
    This boolean property tell the Function implementations that this Operation + + + + +
      +
    • +

      isIndexer

      +
      public final boolean isIndexer()
      +
      Description copied from interface: Operation
      +
      This boolean property tell the Function implementations that this Operation ought to be viewed as something to be indexed. - The Function will use this information to iterate over all the provided inputs and + The Function will use this information to iterate over all the provided inputs and then execute the function wile also passing the index to the function AST. - The resulting array will then be available to this Operation as argument list. - This feature works alongside the Function implementation found in - FunctionVariable, which represents an input indexed + The resulting array will then be available to this Operation as argument list. + This feature works alongside the Function implementation found in + FunctionVariable, which represents an input indexed by the identifier 'j'!
      -
      -
      Specified by:
      -
      isIndexer in interface Operation
      -
      Returns:
      +
      +
      Specified by:
      +
      isIndexer in interface Operation
      +
      Returns:
      If this operation is an indexer.
      -
  • -
  • -
    -

    isDifferentiable

    -
    public final boolean isDifferentiable()
    -
    -
    Specified by:
    -
    isDifferentiable in interface Operation
    + + + + +
      +
    • +

      isDifferentiable

      +
      public final boolean isDifferentiable()
      +
      +
      Specified by:
      +
      isDifferentiable in interface Operation
      -
  • -
  • -
    -

    isInline

    -
    public boolean isInline()
    -
    Description copied from interface: Operation
    -
    This flag indicates that the implementation of this Operation + + + + +
      +
    • +

      isInline

      +
      public boolean isInline()
      +
      Description copied from interface: Operation
      +
      This flag indicates that the implementation of this Operation performs an operation which modifies the inputs to that operation. An example of this would be an assignment operation which copies the contents of one nd-array / tensor into another tensor. This second tensor will then have changed its state. This can be dangerous when auto-differentiation is involved.
      -
      -
      Specified by:
      -
      isInline in interface Operation
      -
      Returns:
      -
      The truth value determining if this Operation changes the contents of inputs.
      +
      +
      Specified by:
      +
      isInline in interface Operation
      +
      Returns:
      +
      The truth value determining if this Operation changes the contents of inputs.
      -
  • -
  • -
    -

    getDefaultAlgorithm

    -
    public final FallbackAlgorithm getDefaultAlgorithm()
    -
    + + + + + + + + +
      +
    • +

      asDerivative

      +
      public java.lang.String asDerivative(Function[] children,
      +                                     int derivationIndex)
      +
      Operation implementations and Function implementations are in a tight relationship + where the Function describes an abstract syntax tree based on the syntactic information provided + by the Operation (through methods like Operation.getOperator() or Operation.getIdentifier()). + One important feature of the Function is the ability to create + derivatives by calling the Function.getDerivative(int) method. + Implementations of this Function method ought to call the method defined below in order to + form the derivation based on the child nodes of the abstract syntax tree of the given Function node.
      +
      +
      Specified by:
      +
      asDerivative in interface Operation
      +
      Parameters:
      children - The child nodes of a AST node referencing this operation.
      derivationIndex - The index of the input node which ought to be derived.
      -
      Returns:
      -
      The derivative as a String which should be parsable into yet another AST.
      +
      Returns:
      +
      The derivative as a String which should be parsable into yet another AST.
      -
    • -
    • -
      -

      stringify

      -
      public String stringify(String[] children)
      -
      -
      Specified by:
      -
      stringify in interface Operation
      +
    + + + +
      +
    • +

      stringify

      +
      public java.lang.String stringify(java.lang.String[] children)
      +
      +
      Specified by:
      +
      stringify in interface Operation
      -
    • -
    • -
      -

      toString

      -
      public final String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public final java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    • -
    • -
      -

      operationName

      -
      protected String operationName()
      +
    + + + +
      +
    • +

      operationName

      +
      protected java.lang.String operationName()
      Override this if you want your operation to have a string representation with a custom prefix which is something other than the simple class name!
      -
      -
      Returns:
      +
      +
      Returns:
      The simple class name, or something else if overridden.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Derivation.html b/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Derivation.html index 2b16e8d46..10ea53a85 100644 --- a/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Derivation.html +++ b/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Derivation.html @@ -1,130 +1,224 @@ - + + - -OperationBuilder.Derivation (neureka 1.0.0 API) - - - - + +OperationBuilder.Derivation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface OperationBuilder.Derivation

    +
    neureka.backend.api.template.operations
    +

    Interface OperationBuilder.Derivation

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      asDerivative(Function[] children, - int d)
      -
       
      -
      -
      +
      +
      public static interface OperationBuilder.Derivation
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        asDerivative

        -
        String asDerivative(Function[] children, - int d)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            asDerivative

            +
            java.lang.String asDerivative(Function[] children,
            +                              int d)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Stringifier.html b/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Stringifier.html index ce5845391..6d8984282 100644 --- a/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Stringifier.html +++ b/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.Stringifier.html @@ -1,128 +1,222 @@ - + + - -OperationBuilder.Stringifier (neureka 1.0.0 API) - - - - + +OperationBuilder.Stringifier (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface OperationBuilder.Stringifier

    +
    neureka.backend.api.template.operations
    +

    Interface OperationBuilder.Stringifier

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      stringify(String[] children)
      -
       
      -
      -
      +
      +
      public static interface OperationBuilder.Stringifier
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        stringify

        -
        String stringify(String[] children)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            stringify

            +
            java.lang.String stringify(java.lang.String[] children)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.html b/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.html index 29ff36c39..68d040b57 100644 --- a/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.html +++ b/docs/jdocs/neureka/backend/api/template/operations/OperationBuilder.html @@ -1,353 +1,544 @@ - + + - -OperationBuilder (neureka 1.0.0 API) - - - - + +OperationBuilder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class OperationBuilder

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.OperationBuilder
    +
    neureka.backend.api.template.operations
    +

    Class OperationBuilder

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.api.template.operations.OperationBuilder
      • +
      +
    • +
    +
    +
      +

    • -
      public final class OperationBuilder -extends Object
      -
      This builder class builds instances of the Operation interface. - Implementing the Operation interface manually can result in a lot of boilerplate code. - A builder class is the perfect fit for the Operation because the interface mostly +
      +
      public final class OperationBuilder
      +extends java.lang.Object
      +
      This builder class builds instances of the Operation interface. + Implementing the Operation interface manually can result in a lot of boilerplate code. + A builder class is the perfect fit for the Operation because the interface mostly defines simple properties...
      In order to ensure that all necessary properties have been set the builder keeps track of the passed parameters. If not all properties have been set, the builder will trow an exception.
      -
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        OperationBuilder

        -
        public OperationBuilder()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            OperationBuilder

            +
            public OperationBuilder()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/api/template/operations/package-frame.html b/docs/jdocs/neureka/backend/api/template/operations/package-frame.html new file mode 100644 index 000000000..450ae7a44 --- /dev/null +++ b/docs/jdocs/neureka/backend/api/template/operations/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.backend.api.template.operations (neureka 1.0.1 API) + + + + +

    neureka.backend.api.template.operations

    + + + diff --git a/docs/jdocs/neureka/backend/api/template/operations/package-summary.html b/docs/jdocs/neureka/backend/api/template/operations/package-summary.html index cd2053ba2..63818635a 100644 --- a/docs/jdocs/neureka/backend/api/template/operations/package-summary.html +++ b/docs/jdocs/neureka/backend/api/template/operations/package-summary.html @@ -1,98 +1,166 @@ - + + - -neureka.backend.api.template.operations (neureka 1.0.0 API) - - - - + +neureka.backend.api.template.operations (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.api.template.operations

    -
    -
    -
    package neureka.backend.api.template.operations
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/api/template/operations/package-tree.html b/docs/jdocs/neureka/backend/api/template/operations/package-tree.html index 687d13f21..914b62aea 100644 --- a/docs/jdocs/neureka/backend/api/template/operations/package-tree.html +++ b/docs/jdocs/neureka/backend/api/template/operations/package-tree.html @@ -1,79 +1,140 @@ - + + - -neureka.backend.api.template.operations Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.api.template.operations Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.api.template.operations

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/cpu/CPUBackend.html b/docs/jdocs/neureka/backend/cpu/CPUBackend.html index 2c1757772..3ef947015 100644 --- a/docs/jdocs/neureka/backend/cpu/CPUBackend.html +++ b/docs/jdocs/neureka/backend/cpu/CPUBackend.html @@ -1,229 +1,365 @@ - + + - -CPUBackend (neureka 1.0.0 API) - - - - + +CPUBackend (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBackend

    -
    -
    java.lang.Object -
    neureka.backend.cpu.CPUBackend
    +
    neureka.backend.cpu
    +

    Class CPUBackend

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.cpu.CPUBackend
      • +
      +
    • +
    +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBackend

        -
        public CPUBackend()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBackend

            +
            public CPUBackend()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • +
    +
    Specified by:
    +
    dispose in interface BackendExtension
    - -
  • -
    -

    getLoader

    -
    public BackendLoader getLoader()
    -
    -
    Specified by:
    -
    getLoader in interface BackendExtension
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/cpu/package-frame.html b/docs/jdocs/neureka/backend/cpu/package-frame.html new file mode 100644 index 000000000..caa883d4c --- /dev/null +++ b/docs/jdocs/neureka/backend/cpu/package-frame.html @@ -0,0 +1,19 @@ + + + + + +neureka.backend.cpu (neureka 1.0.1 API) + + + + +

    neureka.backend.cpu

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/cpu/package-summary.html b/docs/jdocs/neureka/backend/cpu/package-summary.html index 85f18cb1a..097f32620 100644 --- a/docs/jdocs/neureka/backend/cpu/package-summary.html +++ b/docs/jdocs/neureka/backend/cpu/package-summary.html @@ -1,84 +1,141 @@ - + + - -neureka.backend.cpu (neureka 1.0.0 API) - - - - + +neureka.backend.cpu (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.cpu

    +

    Package neureka.backend.cpu

    -
    -
    package neureka.backend.cpu
    -
    -
      -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      +
      +
        +
      • + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        CPUBackend
        This class loads the CPU operations into the Neureka library context.
        - - - +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/cpu/package-tree.html b/docs/jdocs/neureka/backend/cpu/package-tree.html index 57d5fc07b..fca3ff32c 100644 --- a/docs/jdocs/neureka/backend/cpu/package-tree.html +++ b/docs/jdocs/neureka/backend/cpu/package-tree.html @@ -1,71 +1,134 @@ - + + - -neureka.backend.cpu Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.cpu Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.cpu

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/BiElementwise.html b/docs/jdocs/neureka/backend/main/algorithms/BiElementwise.html index b7ea06654..8d282a94c 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/BiElementwise.html +++ b/docs/jdocs/neureka/backend/main/algorithms/BiElementwise.html @@ -1,203 +1,345 @@ - + + - -BiElementwise (neureka 1.0.0 API) - - - - + +BiElementwise (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class BiElementwise

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class BiElementwise

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        BiElementwise

        -
        public BiElementwise()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            BiElementwise

            +
            public BiElementwise()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/BiScalarBroadcast.html b/docs/jdocs/neureka/backend/main/algorithms/BiScalarBroadcast.html index 34e7e1ea4..811af2328 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/BiScalarBroadcast.html +++ b/docs/jdocs/neureka/backend/main/algorithms/BiScalarBroadcast.html @@ -1,203 +1,345 @@ - + + - -BiScalarBroadcast (neureka 1.0.0 API) - - - - + +BiScalarBroadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class BiScalarBroadcast

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class BiScalarBroadcast

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        BiScalarBroadcast

        -
        public BiScalarBroadcast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            BiScalarBroadcast

            +
            public BiScalarBroadcast()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/Broadcast.html b/docs/jdocs/neureka/backend/main/algorithms/Broadcast.html index 69f3e3bd2..469156840 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/Broadcast.html +++ b/docs/jdocs/neureka/backend/main/algorithms/Broadcast.html @@ -1,203 +1,345 @@ - + + - -Broadcast (neureka 1.0.0 API) - - - - + +Broadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Broadcast

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class Broadcast

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Broadcast

        -
        public Broadcast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Broadcast

            +
            public Broadcast()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/DotProductAlgorithm.html b/docs/jdocs/neureka/backend/main/algorithms/DotProductAlgorithm.html index 4d4056b9f..3b253b42e 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/DotProductAlgorithm.html +++ b/docs/jdocs/neureka/backend/main/algorithms/DotProductAlgorithm.html @@ -1,203 +1,345 @@ - + + - -DotProductAlgorithm (neureka 1.0.0 API) - - - - + +DotProductAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DotProductAlgorithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> -
    neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm<DotProductAlgorithm> -
    neureka.backend.main.algorithms.DotProductAlgorithm
    -
    +
    neureka.backend.main.algorithms
    +

    Class DotProductAlgorithm

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        DotProductAlgorithm

        -
        public DotProductAlgorithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            DotProductAlgorithm

            +
            public DotProductAlgorithm()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/ElementwiseAlgorithm.html b/docs/jdocs/neureka/backend/main/algorithms/ElementwiseAlgorithm.html index ca2bb9a68..57422027b 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/ElementwiseAlgorithm.html +++ b/docs/jdocs/neureka/backend/main/algorithms/ElementwiseAlgorithm.html @@ -1,206 +1,348 @@ - + + - -ElementwiseAlgorithm (neureka 1.0.0 API) - - - - + +ElementwiseAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ElementwiseAlgorithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> -
    neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm<ElementwiseAlgorithm> -
    neureka.backend.main.algorithms.ElementwiseAlgorithm
    -
    +
    neureka.backend.main.algorithms
    +

    Class ElementwiseAlgorithm

    -
    -
    -
    +
    + +
    +
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ElementwiseAlgorithm

        -
        public ElementwiseAlgorithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ElementwiseAlgorithm

            +
            public ElementwiseAlgorithm()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/MatMulAlgorithm.html b/docs/jdocs/neureka/backend/main/algorithms/MatMulAlgorithm.html index 8ccb22d0f..e9e224a1c 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/MatMulAlgorithm.html +++ b/docs/jdocs/neureka/backend/main/algorithms/MatMulAlgorithm.html @@ -1,203 +1,345 @@ - + + - -MatMulAlgorithm (neureka 1.0.0 API) - - - - + +MatMulAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class MatMulAlgorithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class MatMulAlgorithm

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        MatMulAlgorithm

        -
        public MatMulAlgorithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            MatMulAlgorithm

            +
            public MatMulAlgorithm()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/NDConvolution.html b/docs/jdocs/neureka/backend/main/algorithms/NDConvolution.html index d355aa2b7..b96d75bee 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/NDConvolution.html +++ b/docs/jdocs/neureka/backend/main/algorithms/NDConvolution.html @@ -1,203 +1,345 @@ - + + - -NDConvolution (neureka 1.0.0 API) - - - - + +NDConvolution (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class NDConvolution

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class NDConvolution

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        NDConvolution

        -
        public NDConvolution()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            NDConvolution

            +
            public NDConvolution()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/ScalarAlgorithm.html b/docs/jdocs/neureka/backend/main/algorithms/ScalarAlgorithm.html index a6a64e2df..b32599ebb 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/ScalarAlgorithm.html +++ b/docs/jdocs/neureka/backend/main/algorithms/ScalarAlgorithm.html @@ -1,203 +1,345 @@ - + + - -ScalarAlgorithm (neureka 1.0.0 API) - - - - + +ScalarAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarAlgorithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class ScalarAlgorithm

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarAlgorithm

        -
        public ScalarAlgorithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarAlgorithm

            +
            public ScalarAlgorithm()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/ScalarBroadcast.html b/docs/jdocs/neureka/backend/main/algorithms/ScalarBroadcast.html index c6283ee0c..db1c4ef7b 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/ScalarBroadcast.html +++ b/docs/jdocs/neureka/backend/main/algorithms/ScalarBroadcast.html @@ -1,203 +1,345 @@ - + + - -ScalarBroadcast (neureka 1.0.0 API) - - - - + +ScalarBroadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarBroadcast

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class ScalarBroadcast

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarBroadcast

        -
        public ScalarBroadcast(ScalarFun fun)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarBroadcast

            +
            public ScalarBroadcast(ScalarFun fun)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/ScalarSumAlgorithm.html b/docs/jdocs/neureka/backend/main/algorithms/ScalarSumAlgorithm.html index 52b47fd3e..ff5cc6f3e 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/ScalarSumAlgorithm.html +++ b/docs/jdocs/neureka/backend/main/algorithms/ScalarSumAlgorithm.html @@ -1,189 +1,319 @@ - + + - -ScalarSumAlgorithm (neureka 1.0.0 API) - - - - + +ScalarSumAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSumAlgorithm

    +
    neureka.backend.main.algorithms
    +

    Class ScalarSumAlgorithm

    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractFunAlgorithm -
    neureka.backend.main.algorithms.ScalarSumAlgorithm
    -
    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarSumAlgorithm

        -
        public ScalarSumAlgorithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarSumAlgorithm

            +
            public ScalarSumAlgorithm()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/SumAlgorithm.html b/docs/jdocs/neureka/backend/main/algorithms/SumAlgorithm.html index 420904f74..e804b7bd2 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/SumAlgorithm.html +++ b/docs/jdocs/neureka/backend/main/algorithms/SumAlgorithm.html @@ -1,203 +1,345 @@ - + + - -SumAlgorithm (neureka 1.0.0 API) - - - - + +SumAlgorithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SumAlgorithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.algorithms.AbstractDeviceAlgorithm<C> - +
    neureka.backend.main.algorithms
    +

    Class SumAlgorithm

    -
    -
    -
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SumAlgorithm

        -
        public SumAlgorithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SumAlgorithm

            +
            public SumAlgorithm()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      This method returns the name of this Algorithm +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          This method returns the name of this Algorithm which may be used as variable names in OpenCL kernels or other backends. Therefore, this name is expected to be void of any spaces or non-numeric and alphabetic characters.
          -
          -
          Specified by:
          -
          getName in interface Algorithm
          -
          Returns:
          -
          The name of this Algorithm.
          +
          +
          Specified by:
          +
          getName in interface Algorithm
          +
          Returns:
          +
          The name of this Algorithm.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/Util.html b/docs/jdocs/neureka/backend/main/algorithms/Util.html index 9130a91ed..56dc65792 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/Util.html +++ b/docs/jdocs/neureka/backend/main/algorithms/Util.html @@ -1,158 +1,268 @@ - + + - -Util (neureka 1.0.0 API) - - - - + +Util (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Util

    +
    neureka.backend.main.algorithms
    +

    Class Util

    -
    java.lang.Object -
    neureka.backend.main.algorithms.Util
    -
    -
    -
    -
    public class Util -extends Object
    -
    -
    -
      - +
      +
        +
      • java.lang.Object
      • -
        -

        Constructor Summary

        -
        Constructors
        -
        -
        Constructor
        -
        Description
        - -
         
        +
          +
        • neureka.backend.main.algorithms.Util
        • +
        +
      • +
      +
      +
        +
      • +
        +
        +
        public class Util
        +extends java.lang.Object
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Util

        -
        public Util()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Util

            +
            public Util()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      transpose

      -
      public static <T> Tensor<T> transpose(Tensor<T> t)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          transpose

          +
          public static <T> Tensor<T> transpose(Tensor<T> t)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/internal/AndBackward.html b/docs/jdocs/neureka/backend/main/algorithms/internal/AndBackward.html index 46fccfa40..75b74ee76 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/internal/AndBackward.html +++ b/docs/jdocs/neureka/backend/main/algorithms/internal/AndBackward.html @@ -1,129 +1,225 @@ - + + - -AndBackward (neureka 1.0.0 API) - - - - + +AndBackward (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface AndBackward<F>

    +
    neureka.backend.main.algorithms.internal
    +

    Interface AndBackward<F>

    -
    -
    +
    +
    +
      +
    • +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public interface AndBackward<F>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      and(F backward)
      -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public interface AndBackward<F>
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/internal/WithForward.html b/docs/jdocs/neureka/backend/main/algorithms/internal/WithForward.html index 3d5ff65a5..a610d7526 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/internal/WithForward.html +++ b/docs/jdocs/neureka/backend/main/algorithms/internal/WithForward.html @@ -1,129 +1,225 @@ - + + - -WithForward (neureka 1.0.0 API) - - - - + +WithForward (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface WithForward<F>

    +
    neureka.backend.main.algorithms.internal
    +

    Interface WithForward<F>

    -
    -
    +
    +
    +
      +
    • +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public interface WithForward<F>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      with(F forward)
      -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public interface WithForward<F>
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/internal/package-frame.html b/docs/jdocs/neureka/backend/main/algorithms/internal/package-frame.html new file mode 100644 index 000000000..1760866f4 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/algorithms/internal/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.backend.main.algorithms.internal (neureka 1.0.1 API) + + + + +

    neureka.backend.main.algorithms.internal

    +
    +

    Interfaces

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/internal/package-summary.html b/docs/jdocs/neureka/backend/main/algorithms/internal/package-summary.html index 4e78f7736..5603a64ed 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/internal/package-summary.html +++ b/docs/jdocs/neureka/backend/main/algorithms/internal/package-summary.html @@ -1,99 +1,143 @@ - + + - -neureka.backend.main.algorithms.internal (neureka 1.0.0 API) - - - - + +neureka.backend.main.algorithms.internal (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.algorithms.internal

    -
    -
    -
    package neureka.backend.main.algorithms.internal
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/internal/package-tree.html b/docs/jdocs/neureka/backend/main/algorithms/internal/package-tree.html index e1d3df887..ae382aea5 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/internal/package-tree.html +++ b/docs/jdocs/neureka/backend/main/algorithms/internal/package-tree.html @@ -1,68 +1,131 @@ - + + - -neureka.backend.main.algorithms.internal Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.algorithms.internal Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.algorithms.internal

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

      -
    • neureka.backend.main.algorithms.internal.AndBackward<F>
    • -
    • neureka.backend.main.algorithms.internal.WithForward<F>
    • +
    • neureka.backend.main.algorithms.internal.AndBackward<F>
    • +
    • neureka.backend.main.algorithms.internal.WithForward<F>
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/package-frame.html b/docs/jdocs/neureka/backend/main/algorithms/package-frame.html new file mode 100644 index 000000000..be89a779c --- /dev/null +++ b/docs/jdocs/neureka/backend/main/algorithms/package-frame.html @@ -0,0 +1,30 @@ + + + + + +neureka.backend.main.algorithms (neureka 1.0.1 API) + + + + +

    neureka.backend.main.algorithms

    + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/package-summary.html b/docs/jdocs/neureka/backend/main/algorithms/package-summary.html index b18eb4d07..1d9aace7d 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/package-summary.html +++ b/docs/jdocs/neureka/backend/main/algorithms/package-summary.html @@ -1,124 +1,200 @@ - + + - -neureka.backend.main.algorithms (neureka 1.0.0 API) - - - - + +neureka.backend.main.algorithms (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.algorithms

    -
    -
    -
    package neureka.backend.main.algorithms
    -
    +

    Package neureka.backend.main.algorithms

    +
    Everything in this package should be considered library-private! DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! Code inside this package or any sub-packages might change frequently...
    -
    -
    -
    -
    + + + +

    Package neureka.backend.main.algorithms Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    + + + + diff --git a/docs/jdocs/neureka/backend/main/algorithms/package-tree.html b/docs/jdocs/neureka/backend/main/algorithms/package-tree.html index dc6707be4..5814419b0 100644 --- a/docs/jdocs/neureka/backend/main/algorithms/package-tree.html +++ b/docs/jdocs/neureka/backend/main/algorithms/package-tree.html @@ -1,94 +1,157 @@ - + + - -neureka.backend.main.algorithms Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.algorithms Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.algorithms

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/CLImplementation.html b/docs/jdocs/neureka/backend/main/implementations/CLImplementation.html index 81023ebbd..0850d2ac6 100644 --- a/docs/jdocs/neureka/backend/main/implementations/CLImplementation.html +++ b/docs/jdocs/neureka/backend/main/implementations/CLImplementation.html @@ -1,155 +1,277 @@ - + + - -CLImplementation (neureka 1.0.0 API) - - - - + +CLImplementation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLImplementation

    +
    neureka.backend.main.implementations
    +

    Class CLImplementation

    - -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.AndImplementation.html b/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.AndImplementation.html index 3c422d59c..51aad7f45 100644 --- a/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.AndImplementation.html +++ b/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.AndImplementation.html @@ -1,130 +1,224 @@ - + + - -CPUImplementation.AndImplementation (neureka 1.0.0 API) - - - - + +CPUImplementation.AndImplementation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface CPUImplementation.AndImplementation

    +
    neureka.backend.main.implementations
    +

    Interface CPUImplementation.AndImplementation

    -
    -
    +
    +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.html b/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.html index ea15e0c63..99e86ab74 100644 --- a/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.html +++ b/docs/jdocs/neureka/backend/main/implementations/CPUImplementation.html @@ -1,164 +1,278 @@ - + + - -CPUImplementation (neureka 1.0.0 API) - - - - + +CPUImplementation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUImplementation

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<CPU> -
    neureka.backend.main.implementations.CPUImplementation
    +
    neureka.backend.main.implementations
    +

    Class CPUImplementation

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/ParsedCLImplementation.html b/docs/jdocs/neureka/backend/main/implementations/ParsedCLImplementation.html index 72621744f..4ade62fdb 100644 --- a/docs/jdocs/neureka/backend/main/implementations/ParsedCLImplementation.html +++ b/docs/jdocs/neureka/backend/main/implementations/ParsedCLImplementation.html @@ -1,194 +1,318 @@ - + + - -ParsedCLImplementation (neureka 1.0.0 API) - - - - + +ParsedCLImplementation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ParsedCLImplementation

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation
    +
    neureka.backend.main.implementations
    +

    Class ParsedCLImplementation

    -
    -
    -
    -
    +
    -
    -
    - +
    +
    +
    + -
  • -
    -

    Method Details

    -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/SimpleCLImplementation.html b/docs/jdocs/neureka/backend/main/implementations/SimpleCLImplementation.html index 6c15a074d..b41d12071 100644 --- a/docs/jdocs/neureka/backend/main/implementations/SimpleCLImplementation.html +++ b/docs/jdocs/neureka/backend/main/implementations/SimpleCLImplementation.html @@ -1,190 +1,314 @@ - + + - -SimpleCLImplementation (neureka 1.0.0 API) - - - - + +SimpleCLImplementation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SimpleCLImplementation

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.SimpleCLImplementation
    +
    neureka.backend.main.implementations
    +

    Class SimpleCLImplementation

    -
    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SimpleCLImplementation

        -
        protected SimpleCLImplementation(ImplementationFor<OpenCLDevice> execution, - int arity, - String kernelName, - String kernelSource)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SimpleCLImplementation

            +
            protected SimpleCLImplementation(ImplementationFor<OpenCLDevice> execution,
            +                                 int arity,
            +                                 java.lang.String kernelName,
            +                                 java.lang.String kernelSource)
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcast.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcast.html index 05ccc689a..b75c953ae 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcast.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcast.html @@ -1,153 +1,277 @@ - + + - -CLBroadcast (neureka 1.0.0 API) - - - - + +CLBroadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBroadcast

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation -
    neureka.backend.main.implementations.broadcast.CLBroadcast
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CLBroadcast

    -
    -
    -
    -
    -
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBroadcast

        -
        protected CLBroadcast(String postfix, - String forward, - String backward)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBroadcast

            +
            protected CLBroadcast(java.lang.String postfix,
            +                      java.lang.String forward,
            +                      java.lang.String backward)
          -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastAddition.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastAddition.html index 4ba589027..af53862f1 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastAddition.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastAddition.html @@ -1,145 +1,272 @@ - + + - -CLBroadcastAddition (neureka 1.0.0 API) - - - - + +CLBroadcastAddition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBroadcastAddition

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBroadcastAddition

        -
        public CLBroadcastAddition(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBroadcastAddition

            +
            public CLBroadcastAddition(java.lang.String id)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastDivision.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastDivision.html index 85e84cb70..cc70e8062 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastDivision.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastDivision.html @@ -1,145 +1,272 @@ - + + - -CLBroadcastDivision (neureka 1.0.0 API) - - - - + +CLBroadcastDivision (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBroadcastDivision

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBroadcastDivision

        -
        public CLBroadcastDivision(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBroadcastDivision

            +
            public CLBroadcastDivision(java.lang.String id)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastModulo.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastModulo.html index 7f0b7e623..8e2544150 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastModulo.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastModulo.html @@ -1,145 +1,272 @@ - + + - -CLBroadcastModulo (neureka 1.0.0 API) - - - - + +CLBroadcastModulo (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBroadcastModulo

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBroadcastModulo

        -
        public CLBroadcastModulo(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBroadcastModulo

            +
            public CLBroadcastModulo(java.lang.String id)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastMultiplication.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastMultiplication.html index 4334ad2cd..ffb22f2d7 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastMultiplication.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastMultiplication.html @@ -1,145 +1,272 @@ - + + - -CLBroadcastMultiplication (neureka 1.0.0 API) - - - - + +CLBroadcastMultiplication (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBroadcastMultiplication

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBroadcastMultiplication

        -
        public CLBroadcastMultiplication(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBroadcastMultiplication

            +
            public CLBroadcastMultiplication(java.lang.String id)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastPower.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastPower.html index 15f62f0d7..455160e8b 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastPower.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastPower.html @@ -1,145 +1,272 @@ - + + - -CLBroadcastPower (neureka 1.0.0 API) - - - - + +CLBroadcastPower (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBroadcastPower

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBroadcastPower

        -
        public CLBroadcastPower(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBroadcastPower

            +
            public CLBroadcastPower(java.lang.String id)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastSubtraction.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastSubtraction.html index 3abc6c343..0bb5e4e67 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastSubtraction.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLBroadcastSubtraction.html @@ -1,145 +1,272 @@ - + + - -CLBroadcastSubtraction (neureka 1.0.0 API) - - - - + +CLBroadcastSubtraction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBroadcastSubtraction

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBroadcastSubtraction

        -
        public CLBroadcastSubtraction(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBroadcastSubtraction

            +
            public CLBroadcastSubtraction(java.lang.String id)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcast.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcast.html index c0eb7c40b..ebdfa5764 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcast.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcast.html @@ -1,188 +1,315 @@ - + + - -CLScalarBroadcast (neureka 1.0.0 API) - - - - + +CLScalarBroadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcast

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation -
    neureka.backend.main.implementations.broadcast.CLScalarBroadcast
    -
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CLScalarBroadcast

    -
    -
    -
    -
    +
    -
    -
      - -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected static final String
      - -
       
      +
      +
      public class CLScalarBroadcast
      +extends ParsedCLImplementation
      +
    • +
    - +
    +
    +
    + -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      CLScalarBroadcast

      -
      public CLScalarBroadcast(String postfix, - String activation, - String derivation)
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CLScalarBroadcast

          +
          public CLScalarBroadcast(java.lang.String postfix,
          +                         java.lang.String activation,
          +                         java.lang.String derivation)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastAddition.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastAddition.html index 7849821ed..bc2fdbc99 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastAddition.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastAddition.html @@ -1,201 +1,333 @@ - + + - -CLScalarBroadcastAddition (neureka 1.0.0 API) - - - - + +CLScalarBroadcastAddition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcastAddition

    -
    - -
    -
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarBroadcastAddition

        -
        public CLScalarBroadcastAddition(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarBroadcastAddition

            +
            public CLScalarBroadcastAddition(java.lang.String id)
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastDivision.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastDivision.html index f84bb0d29..0fa93eee7 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastDivision.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastDivision.html @@ -1,201 +1,333 @@ - + + - -CLScalarBroadcastDivision (neureka 1.0.0 API) - - - - + +CLScalarBroadcastDivision (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcastDivision

    -
    - -
    -
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarBroadcastDivision

        -
        public CLScalarBroadcastDivision(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarBroadcastDivision

            +
            public CLScalarBroadcastDivision(java.lang.String id)
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastIdentity.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastIdentity.html index f53f4b3d5..11009e502 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastIdentity.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastIdentity.html @@ -1,201 +1,333 @@ - + + - -CLScalarBroadcastIdentity (neureka 1.0.0 API) - - - - + +CLScalarBroadcastIdentity (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcastIdentity

    -
    - -
    -
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarBroadcastIdentity

        -
        public CLScalarBroadcastIdentity(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarBroadcastIdentity

            +
            public CLScalarBroadcastIdentity(java.lang.String id)
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastModulo.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastModulo.html index b78cd5b29..caa6baf78 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastModulo.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastModulo.html @@ -1,201 +1,333 @@ - + + - -CLScalarBroadcastModulo (neureka 1.0.0 API) - - - - + +CLScalarBroadcastModulo (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcastModulo

    -
    - -
    -
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarBroadcastModulo

        -
        public CLScalarBroadcastModulo(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarBroadcastModulo

            +
            public CLScalarBroadcastModulo(java.lang.String id)
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastMultiplication.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastMultiplication.html index ce1492257..c40c79f42 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastMultiplication.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastMultiplication.html @@ -1,201 +1,333 @@ - + + - -CLScalarBroadcastMultiplication (neureka 1.0.0 API) - - - - + +CLScalarBroadcastMultiplication (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcastMultiplication

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation -
    neureka.backend.main.implementations.broadcast.CLScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CLScalarBroadcastMultiplication
    -
    -
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CLScalarBroadcastMultiplication

    -
    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarBroadcastMultiplication

        -
        public CLScalarBroadcastMultiplication(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarBroadcastMultiplication

            +
            public CLScalarBroadcastMultiplication(java.lang.String id)
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastPower.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastPower.html index 6f610be15..2a60eb311 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastPower.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastPower.html @@ -1,201 +1,333 @@ - + + - -CLScalarBroadcastPower (neureka 1.0.0 API) - - - - + +CLScalarBroadcastPower (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcastPower

    -
    - -
    -
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarBroadcastPower

        -
        public CLScalarBroadcastPower(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarBroadcastPower

            +
            public CLScalarBroadcastPower(java.lang.String id)
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastSubtraction.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastSubtraction.html index 453309bf1..ea099db5f 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastSubtraction.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CLScalarBroadcastSubtraction.html @@ -1,201 +1,333 @@ - + + - -CLScalarBroadcastSubtraction (neureka 1.0.0 API) - - - - + +CLScalarBroadcastSubtraction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarBroadcastSubtraction

    -
    - -
    -
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarBroadcastSubtraction

        -
        public CLScalarBroadcastSubtraction(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarBroadcastSubtraction

            +
            public CLScalarBroadcastSubtraction(java.lang.String id)
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcast.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcast.html index da090e558..c70693334 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcast.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcast.html @@ -1,213 +1,335 @@ - + + - -CPUBroadcast (neureka 1.0.0 API) - - - - + +CPUBroadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcast

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcast

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.broadcast.CPUBroadcast
      • +
      +
    • +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      - -
       
      +
      +
      public abstract class CPUBroadcast
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcast

        -
        protected CPUBroadcast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcast

            +
            protected CPUBroadcast()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      _getFun

      -
      protected abstract CPUBiFun _getFun()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          _getFun

          +
          protected abstract CPUBiFun _getFun()
        • -
        • -
          -

          _getDeriveAt0

          -
          protected abstract CPUBiFun _getDeriveAt0()
          -
          +
        + + + +
          +
        • +

          _getDeriveAt0

          +
          protected abstract CPUBiFun _getDeriveAt0()
        • -
        • -
          -

          _getDeriveAt1

          -
          protected abstract CPUBiFun _getDeriveAt1()
          -
          +
        + + + + + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastAddition.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastAddition.html index 10ad3e57c..47114b436 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastAddition.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastAddition.html @@ -1,197 +1,322 @@ - + + - -CPUBroadcastAddition (neureka 1.0.0 API) - - - - + +CPUBroadcastAddition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcastAddition

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast -
    neureka.backend.main.implementations.broadcast.CPUBroadcastAddition
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcastAddition

    -
    -
    -
    +
    + +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CPUBroadcastAddition
      +extends CPUBroadcast
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcastAddition

        -
        public CPUBroadcastAddition()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcastAddition

            +
            public CPUBroadcastAddition()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastDivision.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastDivision.html index 983217c59..bb3aaf8cd 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastDivision.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastDivision.html @@ -1,197 +1,322 @@ - + + - -CPUBroadcastDivision (neureka 1.0.0 API) - - - - + +CPUBroadcastDivision (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcastDivision

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast -
    neureka.backend.main.implementations.broadcast.CPUBroadcastDivision
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcastDivision

    -
    -
    -
    +
    + +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CPUBroadcastDivision
      +extends CPUBroadcast
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcastDivision

        -
        public CPUBroadcastDivision()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcastDivision

            +
            public CPUBroadcastDivision()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastModulo.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastModulo.html index 4739a6dce..526f909ae 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastModulo.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastModulo.html @@ -1,197 +1,322 @@ - + + - -CPUBroadcastModulo (neureka 1.0.0 API) - - - - + +CPUBroadcastModulo (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcastModulo

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast -
    neureka.backend.main.implementations.broadcast.CPUBroadcastModulo
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcastModulo

    -
    -
    -
    +
    + +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CPUBroadcastModulo
      +extends CPUBroadcast
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcastModulo

        -
        public CPUBroadcastModulo()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcastModulo

            +
            public CPUBroadcastModulo()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastMultiplication.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastMultiplication.html index 172af7618..2d726b911 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastMultiplication.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastMultiplication.html @@ -1,197 +1,322 @@ - + + - -CPUBroadcastMultiplication (neureka 1.0.0 API) - - - - + +CPUBroadcastMultiplication (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcastMultiplication

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast -
    neureka.backend.main.implementations.broadcast.CPUBroadcastMultiplication
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcastMultiplication

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcastMultiplication

        -
        public CPUBroadcastMultiplication()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcastMultiplication

            +
            public CPUBroadcastMultiplication()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastPower.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastPower.html index 0f7873e28..806c43959 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastPower.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastPower.html @@ -1,197 +1,322 @@ - + + - -CPUBroadcastPower (neureka 1.0.0 API) - - - - + +CPUBroadcastPower (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcastPower

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast -
    neureka.backend.main.implementations.broadcast.CPUBroadcastPower
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcastPower

    -
    -
    -
    +
    + +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CPUBroadcastPower
      +extends CPUBroadcast
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcastPower

        -
        public CPUBroadcastPower()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcastPower

            +
            public CPUBroadcastPower()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSubtraction.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSubtraction.html index 100d219cf..691135e95 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSubtraction.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSubtraction.html @@ -1,197 +1,322 @@ - + + - -CPUBroadcastSubtraction (neureka 1.0.0 API) - - - - + +CPUBroadcastSubtraction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcastSubtraction

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast -
    neureka.backend.main.implementations.broadcast.CPUBroadcastSubtraction
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcastSubtraction

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcastSubtraction

        -
        public CPUBroadcastSubtraction()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcastSubtraction

            +
            public CPUBroadcastSubtraction()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSummation.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSummation.html index 9ef01a4fd..dbbdfbe4a 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSummation.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUBroadcastSummation.html @@ -1,197 +1,322 @@ - + + - -CPUBroadcastSummation (neureka 1.0.0 API) - - - - + +CPUBroadcastSummation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBroadcastSummation

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUBroadcast -
    neureka.backend.main.implementations.broadcast.CPUBroadcastSummation
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUBroadcastSummation

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBroadcastSummation

        -
        public CPUBroadcastSummation()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBroadcastSummation

            +
            public CPUBroadcastSummation()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalaBroadcastPower.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalaBroadcastPower.html index 9d5241b70..fe1b0c035 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalaBroadcastPower.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalaBroadcastPower.html @@ -1,197 +1,322 @@ - + + - -CPUScalaBroadcastPower (neureka 1.0.0 API) - - - - + +CPUScalaBroadcastPower (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalaBroadcastPower

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CPUScalaBroadcastPower
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalaBroadcastPower

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalaBroadcastPower

        -
        public CPUScalaBroadcastPower()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalaBroadcastPower

            +
            public CPUScalaBroadcastPower()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUScalarBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcast.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcast.html index 21207a12a..b3c397526 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcast.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcast.html @@ -1,220 +1,346 @@ - + + - -CPUScalarBroadcast (neureka 1.0.0 API) - - - - + +CPUScalarBroadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcast

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalarBroadcast

    -
    -
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public abstract class CPUScalarBroadcast
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcast

        -
        public CPUScalarBroadcast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcast

            +
            public CPUScalarBroadcast()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      _getFun

      -
      protected abstract CPUBiFun _getFun()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          _getFun

          +
          protected abstract CPUBiFun _getFun()
        • -
        • -
          -

          _getDeriveAt0

          -
          protected abstract CPUBiFun _getDeriveAt0()
          -
          +
        + + + +
          +
        • +

          _getDeriveAt0

          +
          protected abstract CPUBiFun _getDeriveAt0()
        • -
        • -
          -

          _getDeriveAt1

          -
          protected abstract CPUBiFun _getDeriveAt1()
          -
          +
        + + + + + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
  • -
  • -
    -

    _workloadFor

    -
    public CPU.RangeWorkload _workloadFor(ExecutionCall<CPU> call)
    -
    + + + + +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastAddition.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastAddition.html index 2aad865de..6eb673225 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastAddition.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastAddition.html @@ -1,225 +1,354 @@ - + + - -CPUScalarBroadcastAddition (neureka 1.0.0 API) - - - - + +CPUScalarBroadcastAddition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcastAddition

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcastAddition
    -
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalarBroadcastAddition

    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcastAddition

        -
        public CPUScalarBroadcastAddition()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcastAddition

            +
            public CPUScalarBroadcastAddition()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt0

    -
    protected CPUBiFun _getDeriveAt0()
    -
    -
    Specified by:
    -
    _getDeriveAt0 in class CPUScalarBroadcast
    + + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUScalarBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastDivision.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastDivision.html index 7ed754bac..45af6b272 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastDivision.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastDivision.html @@ -1,197 +1,322 @@ - + + - -CPUScalarBroadcastDivision (neureka 1.0.0 API) - - - - + +CPUScalarBroadcastDivision (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcastDivision

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcastDivision
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalarBroadcastDivision

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcastDivision

        -
        public CPUScalarBroadcastDivision()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcastDivision

            +
            public CPUScalarBroadcastDivision()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUScalarBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastIdentity.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastIdentity.html index e100a3f99..cb0ecf3c4 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastIdentity.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastIdentity.html @@ -1,197 +1,322 @@ - + + - -CPUScalarBroadcastIdentity (neureka 1.0.0 API) - - - - + +CPUScalarBroadcastIdentity (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcastIdentity

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcastIdentity
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalarBroadcastIdentity

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcastIdentity

        -
        public CPUScalarBroadcastIdentity()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcastIdentity

            +
            public CPUScalarBroadcastIdentity()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUScalarBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastModulo.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastModulo.html index e1fad5b46..2464aa4fc 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastModulo.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastModulo.html @@ -1,197 +1,322 @@ - + + - -CPUScalarBroadcastModulo (neureka 1.0.0 API) - - - - + +CPUScalarBroadcastModulo (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcastModulo

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcastModulo
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalarBroadcastModulo

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcastModulo

        -
        public CPUScalarBroadcastModulo()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcastModulo

            +
            public CPUScalarBroadcastModulo()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUScalarBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastMultiplication.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastMultiplication.html index 6058c126c..54bded20c 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastMultiplication.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastMultiplication.html @@ -1,225 +1,354 @@ - + + - -CPUScalarBroadcastMultiplication (neureka 1.0.0 API) - - - - + +CPUScalarBroadcastMultiplication (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcastMultiplication

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcastMultiplication
    -
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalarBroadcastMultiplication

    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcastMultiplication

        -
        public CPUScalarBroadcastMultiplication()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcastMultiplication

            +
            public CPUScalarBroadcastMultiplication()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt0

    -
    protected CPUBiFun _getDeriveAt0()
    -
    -
    Specified by:
    -
    _getDeriveAt0 in class CPUScalarBroadcast
    + + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUScalarBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastSubtraction.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastSubtraction.html index 8841ea120..11e38fee7 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastSubtraction.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/CPUScalarBroadcastSubtraction.html @@ -1,197 +1,322 @@ - + + - -CPUScalarBroadcastSubtraction (neureka 1.0.0 API) - - - - + +CPUScalarBroadcastSubtraction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcastSubtraction

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcast -
    neureka.backend.main.implementations.broadcast.CPUScalarBroadcastSubtraction
    +
    neureka.backend.main.implementations.broadcast
    +

    Class CPUScalarBroadcastSubtraction

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcastSubtraction

        -
        public CPUScalarBroadcastSubtraction()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcastSubtraction

            +
            public CPUScalarBroadcastSubtraction()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUScalarBroadcast
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/package-frame.html new file mode 100644 index 000000000..f9413f7fa --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/package-frame.html @@ -0,0 +1,49 @@ + + + + + +neureka.backend.main.implementations.broadcast (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.broadcast

    + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/package-summary.html index be9721541..ad2b88527 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/package-summary.html @@ -1,157 +1,259 @@ - + + - -neureka.backend.main.implementations.broadcast (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.broadcast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.broadcast

    -
    -
    -
    package neureka.backend.main.implementations.broadcast
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/broadcast/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/broadcast/package-tree.html index 45d3fb720..c35825480 100644 --- a/docs/jdocs/neureka/backend/main/implementations/broadcast/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/broadcast/package-tree.html @@ -1,89 +1,109 @@ - + + - -neureka.backend.main.implementations.broadcast Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.broadcast Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.broadcast

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/convolution/AbstractCPUConvolution.html b/docs/jdocs/neureka/backend/main/implementations/convolution/AbstractCPUConvolution.html index e138db95a..33a734e46 100644 --- a/docs/jdocs/neureka/backend/main/implementations/convolution/AbstractCPUConvolution.html +++ b/docs/jdocs/neureka/backend/main/implementations/convolution/AbstractCPUConvolution.html @@ -1,193 +1,307 @@ - + + - -AbstractCPUConvolution (neureka 1.0.0 API) - - - - + +AbstractCPUConvolution (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractCPUConvolution

    +
    neureka.backend.main.implementations.convolution
    +

    Class AbstractCPUConvolution

    -
    java.lang.Object -
    neureka.backend.main.implementations.convolution.AbstractCPUConvolution
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.convolution.AbstractCPUConvolution
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public abstract class AbstractCPUConvolution
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AbstractCPUConvolution

        -
        public AbstractCPUConvolution()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AbstractCPUConvolution

            +
            public AbstractCPUConvolution()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      _getFun

      -
      protected abstract CPUBiFun _getFun()
      -
      +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/convolution/CLConvolution.html b/docs/jdocs/neureka/backend/main/implementations/convolution/CLConvolution.html index 84cd5bd2a..2d376f96b 100644 --- a/docs/jdocs/neureka/backend/main/implementations/convolution/CLConvolution.html +++ b/docs/jdocs/neureka/backend/main/implementations/convolution/CLConvolution.html @@ -1,143 +1,267 @@ - + + - -CLConvolution (neureka 1.0.0 API) - - - - + +CLConvolution (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLConvolution

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation -
    neureka.backend.main.implementations.convolution.CLConvolution
    +
    neureka.backend.main.implementations.convolution
    +

    Class CLConvolution

    -
    -
    -
    -
    -
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLConvolution

        -
        public CLConvolution(String id)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLConvolution

            +
            public CLConvolution(java.lang.String id)
          -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/convolution/CPUConvolution.html b/docs/jdocs/neureka/backend/main/implementations/convolution/CPUConvolution.html index f38be8bb4..c1af3d44e 100644 --- a/docs/jdocs/neureka/backend/main/implementations/convolution/CPUConvolution.html +++ b/docs/jdocs/neureka/backend/main/implementations/convolution/CPUConvolution.html @@ -1,171 +1,288 @@ - + + - -CPUConvolution (neureka 1.0.0 API) - - - - + +CPUConvolution (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUConvolution

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.convolution.AbstractCPUConvolution -
    neureka.backend.main.implementations.convolution.CPUConvolution
    +
    neureka.backend.main.implementations.convolution
    +

    Class CPUConvolution

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUConvolution

        -
        public CPUConvolution()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUConvolution

            +
            public CPUConvolution()
          -
    • +
    -
  • -
    -

    Method Details

    - -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/convolution/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/convolution/package-frame.html new file mode 100644 index 000000000..7a6dd7899 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/convolution/package-frame.html @@ -0,0 +1,21 @@ + + + + + +neureka.backend.main.implementations.convolution (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.convolution

    + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/convolution/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/convolution/package-summary.html index 6f2c96303..0b2b78a97 100644 --- a/docs/jdocs/neureka/backend/main/implementations/convolution/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/convolution/package-summary.html @@ -1,101 +1,147 @@ - + + - -neureka.backend.main.implementations.convolution (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.convolution (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.convolution

    -
    -
    -
    package neureka.backend.main.implementations.convolution
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/convolution/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/convolution/package-tree.html index 868493d86..ebf6365a4 100644 --- a/docs/jdocs/neureka/backend/main/implementations/convolution/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/convolution/package-tree.html @@ -1,76 +1,96 @@ - + + - -neureka.backend.main.implementations.convolution Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.convolution Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.convolution

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwise.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwise.html index 354f010bf..e951e54a3 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwise.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwise.html @@ -1,151 +1,275 @@ - + + - -CLBiElementwise (neureka 1.0.0 API) - - - - + +CLBiElementwise (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBiElementwise

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation -
    neureka.backend.main.implementations.elementwise.CLBiElementwise
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CLBiElementwise

    -
    -
    -
    -
    -
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBiElementwise

        -
        public CLBiElementwise(String postfix, - String activationSource, - String differentiationSource)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBiElementwise

            +
            public CLBiElementwise(java.lang.String postfix,
            +                       java.lang.String activationSource,
            +                       java.lang.String differentiationSource)
          -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseAddition.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseAddition.html index d6e0dcab2..118d333b1 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseAddition.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseAddition.html @@ -1,145 +1,272 @@ - + + - -CLBiElementwiseAddition (neureka 1.0.0 API) - - - - + +CLBiElementwiseAddition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBiElementwiseAddition

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBiElementwiseAddition

        -
        public CLBiElementwiseAddition(String postfix)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBiElementwiseAddition

            +
            public CLBiElementwiseAddition(java.lang.String postfix)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseDivision.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseDivision.html index 7db1a75f7..e2672bc87 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseDivision.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseDivision.html @@ -1,145 +1,272 @@ - + + - -CLBiElementwiseDivision (neureka 1.0.0 API) - - - - + +CLBiElementwiseDivision (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBiElementwiseDivision

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBiElementwiseDivision

        -
        public CLBiElementwiseDivision(String postfix)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBiElementwiseDivision

            +
            public CLBiElementwiseDivision(java.lang.String postfix)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseModulo.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseModulo.html index b5fe70e30..67ccbd59a 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseModulo.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseModulo.html @@ -1,145 +1,272 @@ - + + - -CLBiElementwiseModulo (neureka 1.0.0 API) - - - - + +CLBiElementwiseModulo (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBiElementwiseModulo

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBiElementwiseModulo

        -
        public CLBiElementwiseModulo(String postfix)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBiElementwiseModulo

            +
            public CLBiElementwiseModulo(java.lang.String postfix)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseMultiplication.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseMultiplication.html index cbbfa7779..6c489f54b 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseMultiplication.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseMultiplication.html @@ -1,145 +1,272 @@ - + + - -CLBiElementwiseMultiplication (neureka 1.0.0 API) - - - - + +CLBiElementwiseMultiplication (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBiElementwiseMultiplication

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation -
    neureka.backend.main.implementations.elementwise.CLBiElementwise -
    neureka.backend.main.implementations.elementwise.CLBiElementwiseMultiplication
    -
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CLBiElementwiseMultiplication

    -
    -
    -
    -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBiElementwiseMultiplication

        -
        public CLBiElementwiseMultiplication(String postfix)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBiElementwiseMultiplication

            +
            public CLBiElementwiseMultiplication(java.lang.String postfix)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwisePower.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwisePower.html index d041ef50d..c8e344110 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwisePower.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwisePower.html @@ -1,145 +1,272 @@ - + + - -CLBiElementwisePower (neureka 1.0.0 API) - - - - + +CLBiElementwisePower (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBiElementwisePower

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBiElementwisePower

        -
        public CLBiElementwisePower(String postfix)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBiElementwisePower

            +
            public CLBiElementwisePower(java.lang.String postfix)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseSubtraction.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseSubtraction.html index 0eec26950..af880e58d 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseSubtraction.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLBiElementwiseSubtraction.html @@ -1,145 +1,272 @@ - + + - -CLBiElementwiseSubtraction (neureka 1.0.0 API) - - - - + +CLBiElementwiseSubtraction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBiElementwiseSubtraction

    -
    - -
    -
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLBiElementwiseSubtraction

        -
        public CLBiElementwiseSubtraction(String postfix)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLBiElementwiseSubtraction

            +
            public CLBiElementwiseSubtraction(java.lang.String postfix)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLElementwiseFunction.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLElementwiseFunction.html index f4ed7d4cd..cbec34835 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLElementwiseFunction.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLElementwiseFunction.html @@ -1,143 +1,267 @@ - + + - -CLElementwiseFunction (neureka 1.0.0 API) - - - - + +CLElementwiseFunction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLElementwiseFunction

    -
    -
    java.lang.Object -
    neureka.backend.api.template.implementations.AbstractImplementationFor<OpenCLDevice> -
    neureka.backend.main.implementations.CLImplementation -
    neureka.backend.main.implementations.ParsedCLImplementation -
    neureka.backend.main.implementations.elementwise.CLElementwiseFunction
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CLElementwiseFunction

    -
    -
    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLElementwiseFunction

        -
        public CLElementwiseFunction(ScalarFun fun)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLElementwiseFunction

            +
            public CLElementwiseFunction(ScalarFun fun)
          -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLRandomization.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLRandomization.html index cb675d065..da36dc44a 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CLRandomization.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CLRandomization.html @@ -1,180 +1,290 @@ - + + - -CLRandomization (neureka 1.0.0 API) - - - - + +CLRandomization (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLRandomization

    +
    neureka.backend.main.implementations.elementwise
    +

    Class CLRandomization

    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CLRandomization
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.elementwise.CLRandomization
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLRandomization

        -
        public CLRandomization()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLRandomization

            +
            public CLRandomization()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<OpenCLDevice>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<OpenCLDevice>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWise.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWise.html index b0560588c..0fcdc1459 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWise.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWise.html @@ -1,211 +1,333 @@ - + + - -CPUBiElementWise (neureka 1.0.0 API) - - - - + +CPUBiElementWise (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBiElementWise

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUBiElementWise
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUBiElementWise

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.elementwise.CPUBiElementWise
      • +
      +
    • +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public abstract class CPUBiElementWise
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBiElementWise

        -
        public CPUBiElementWise()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBiElementWise

            +
            public CPUBiElementWise()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      _getFun

      -
      protected abstract CPUBiFun _getFun()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          _getFun

          +
          protected abstract CPUBiFun _getFun()
        • -
        • -
          -

          _getDeriveAt0

          -
          protected abstract CPUBiFun _getDeriveAt0()
          -
          +
        + + + +
          +
        • +

          _getDeriveAt0

          +
          protected abstract CPUBiFun _getDeriveAt0()
        • -
        • -
          -

          _getDeriveAt1

          -
          protected abstract CPUBiFun _getDeriveAt1()
          -
          +
        + + + + + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseAddition.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseAddition.html index 92b5bc3c4..009eb9ad0 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseAddition.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseAddition.html @@ -1,197 +1,322 @@ - + + - -CPUBiElementWiseAddition (neureka 1.0.0 API) - - - - + +CPUBiElementWiseAddition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBiElementWiseAddition

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUBiElementWise -
    neureka.backend.main.implementations.elementwise.CPUBiElementWiseAddition
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUBiElementWiseAddition

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBiElementWiseAddition

        -
        public CPUBiElementWiseAddition()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBiElementWiseAddition

            +
            public CPUBiElementWiseAddition()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBiElementWise
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseDivision.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseDivision.html index 11cc2a58b..d4b624598 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseDivision.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseDivision.html @@ -1,197 +1,322 @@ - + + - -CPUBiElementWiseDivision (neureka 1.0.0 API) - - - - + +CPUBiElementWiseDivision (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBiElementWiseDivision

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUBiElementWise -
    neureka.backend.main.implementations.elementwise.CPUBiElementWiseDivision
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUBiElementWiseDivision

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBiElementWiseDivision

        -
        public CPUBiElementWiseDivision()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBiElementWiseDivision

            +
            public CPUBiElementWiseDivision()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBiElementWise
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseModulo.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseModulo.html index b0f652756..d3acfef34 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseModulo.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseModulo.html @@ -1,197 +1,322 @@ - + + - -CPUBiElementWiseModulo (neureka 1.0.0 API) - - - - + +CPUBiElementWiseModulo (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBiElementWiseModulo

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUBiElementWise -
    neureka.backend.main.implementations.elementwise.CPUBiElementWiseModulo
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUBiElementWiseModulo

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBiElementWiseModulo

        -
        public CPUBiElementWiseModulo()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBiElementWiseModulo

            +
            public CPUBiElementWiseModulo()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBiElementWise
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseMultiplication.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseMultiplication.html index a5691e1d7..0cd639997 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseMultiplication.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseMultiplication.html @@ -1,197 +1,322 @@ - + + - -CPUBiElementWiseMultiplication (neureka 1.0.0 API) - - - - + +CPUBiElementWiseMultiplication (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBiElementWiseMultiplication

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUBiElementWise -
    neureka.backend.main.implementations.elementwise.CPUBiElementWiseMultiplication
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUBiElementWiseMultiplication

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBiElementWiseMultiplication

        -
        public CPUBiElementWiseMultiplication()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBiElementWiseMultiplication

            +
            public CPUBiElementWiseMultiplication()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBiElementWise
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWisePower.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWisePower.html index ddd3762aa..c2d7d4753 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWisePower.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWisePower.html @@ -1,197 +1,322 @@ - + + - -CPUBiElementWisePower (neureka 1.0.0 API) - - - - + +CPUBiElementWisePower (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBiElementWisePower

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUBiElementWise -
    neureka.backend.main.implementations.elementwise.CPUBiElementWisePower
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUBiElementWisePower

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBiElementWisePower

        -
        public CPUBiElementWisePower()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBiElementWisePower

            +
            public CPUBiElementWisePower()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBiElementWise
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseSubtraction.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseSubtraction.html index 402d9dc2b..7da26315d 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseSubtraction.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUBiElementWiseSubtraction.html @@ -1,197 +1,322 @@ - + + - -CPUBiElementWiseSubtraction (neureka 1.0.0 API) - - - - + +CPUBiElementWiseSubtraction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUBiElementWiseSubtraction

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUBiElementWise -
    neureka.backend.main.implementations.elementwise.CPUBiElementWiseSubtraction
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUBiElementWiseSubtraction

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUBiElementWiseSubtraction

        -
        public CPUBiElementWiseSubtraction()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUBiElementWiseSubtraction

            +
            public CPUBiElementWiseSubtraction()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    _getDeriveAt1

    -
    protected CPUBiFun _getDeriveAt1()
    -
    -
    Specified by:
    -
    _getDeriveAt1 in class CPUBiElementWise
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseAssignFun.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseAssignFun.html index fd0ba1fdb..7fd94f9b0 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseAssignFun.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseAssignFun.html @@ -1,183 +1,296 @@ - + + - -CPUElementwiseAssignFun (neureka 1.0.0 API) - - - - + +CPUElementwiseAssignFun (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUElementwiseAssignFun

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUElementwiseFunction -
    neureka.backend.main.implementations.elementwise.CPUElementwiseAssignFun
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUElementwiseAssignFun

    -
    -
    -
    +
    + +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUElementwiseAssignFun

        -
        public CPUElementwiseAssignFun()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUElementwiseAssignFun

            +
            public CPUElementwiseAssignFun()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Overrides:
          -
          run in class CPUElementwiseFunction
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Overrides:
          +
          run in class CPUElementwiseFunction
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseFunction.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseFunction.html index f7889d127..e0c430e53 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseFunction.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPUElementwiseFunction.html @@ -1,184 +1,294 @@ - + + - -CPUElementwiseFunction (neureka 1.0.0 API) - - - - + +CPUElementwiseFunction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUElementwiseFunction

    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPUElementwiseFunction

    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPUElementwiseFunction
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.elementwise.CPUElementwiseFunction
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUElementwiseFunction

        -
        public CPUElementwiseFunction(ScalarFun fun)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUElementwiseFunction

            +
            public CPUElementwiseFunction(ScalarFun fun)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPURandomization.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPURandomization.html index bfe19acb6..1e650bd13 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/CPURandomization.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/CPURandomization.html @@ -1,233 +1,369 @@ - + + - -CPURandomization (neureka 1.0.0 API) - - - - + +CPURandomization (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPURandomization

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.elementwise.CPURandomization
    +
    neureka.backend.main.implementations.elementwise
    +

    Class CPURandomization

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.elementwise.CPURandomization
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CPURandomization
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPURandomization

        -
        public CPURandomization()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPURandomization

            +
            public CPURandomization()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    • -
    • -
      -

      fillRandomly

      -
      public static <T> T fillRandomly(T data, - Arg.Seed seed)
      -
      +
    + + + + + +
      +
    • +

      fillRandomly

      +
      public static <T> T fillRandomly(T data,
      +                                 Arg.Seed seed)
    • -
    • -
      -

      fillRandomly

      -
      public static <T> T fillRandomly(T data, - String seed)
      -
      +
    + + + + + +
      +
    • +

      fillRandomly

      +
      public static <T> T fillRandomly(T data,
      +                                 java.lang.String seed)
    • -
    • -
      -

      fillRandomly

      -
      public static <T> T fillRandomly(T data, - long seed)
      -
      +
    + + + + + +
      +
    • +

      fillRandomly

      +
      public static <T> T fillRandomly(T data,
      +                                 long seed)
    • -
    • -
      -

      initialScramble

      -
      public static long initialScramble(long seed)
      -
      +
    + + + +
      +
    • +

      initialScramble

      +
      public static long initialScramble(long seed)
    • -
    • -
      -

      gaussianFrom

      -
      public static void gaussianFrom(long seed, - double[] out)
      -
      +
    + + + +
      +
    • +

      gaussianFrom

      +
      public static void gaussianFrom(long seed,
      +                                double[] out)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/package-frame.html new file mode 100644 index 000000000..c7449f10c --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/package-frame.html @@ -0,0 +1,37 @@ + + + + + +neureka.backend.main.implementations.elementwise (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.elementwise

    + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/package-summary.html index c1167e476..a2a116634 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/package-summary.html @@ -1,133 +1,211 @@ - + + - -neureka.backend.main.implementations.elementwise (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.elementwise (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.elementwise

    -
    -
    -
    package neureka.backend.main.implementations.elementwise
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/elementwise/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/elementwise/package-tree.html index 50ff2054e..fc0b10d99 100644 --- a/docs/jdocs/neureka/backend/main/implementations/elementwise/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/elementwise/package-tree.html @@ -1,110 +1,173 @@ - + + - -neureka.backend.main.implementations.elementwise Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.elementwise Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.elementwise

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarAbsolute.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarAbsolute.html index fccb6c49c..e26e73db3 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarAbsolute.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarAbsolute.html @@ -1,231 +1,367 @@ - + + - -ScalarAbsolute (neureka 1.0.0 API) - - - - + +ScalarAbsolute (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarAbsolute

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarAbsolute
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarAbsolute

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarAbsolute
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarAbsolute
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarAbsolute

        -
        public ScalarAbsolute()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarAbsolute

            +
            public ScalarAbsolute()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCbrt.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCbrt.html index bd1cd407e..55468c7db 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCbrt.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCbrt.html @@ -1,231 +1,367 @@ - + + - -ScalarCbrt (neureka 1.0.0 API) - - - - + +ScalarCbrt (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarCbrt

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarCbrt
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarCbrt

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarCbrt
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class ScalarCbrt
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarCbrt

        -
        public ScalarCbrt()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarCbrt

            +
            public ScalarCbrt()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCosinus.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCosinus.html index dd0d215f1..7cf02deba 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCosinus.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarCosinus.html @@ -1,231 +1,367 @@ - + + - -ScalarCosinus (neureka 1.0.0 API) - - - - + +ScalarCosinus (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarCosinus

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarCosinus
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarCosinus

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarCosinus
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarCosinus
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarCosinus

        -
        public ScalarCosinus()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarCosinus

            +
            public ScalarCosinus()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarExp.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarExp.html index b042ebc90..5e25c2a09 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarExp.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarExp.html @@ -1,231 +1,367 @@ - + + - -ScalarExp (neureka 1.0.0 API) - - - - + +ScalarExp (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarExp

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarExp
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarExp

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarExp
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class ScalarExp
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarExp

        -
        public ScalarExp()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarExp

            +
            public ScalarExp()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaSU.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaSU.html index 154072885..dd6b7cfb5 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaSU.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaSU.html @@ -1,237 +1,373 @@ - + + - -ScalarGaSU (neureka 1.0.0 API) - - - - + +ScalarGaSU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarGaSU

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarGaSU
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarGaSU

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarGaSU
      • +
      +
    • +
    +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarGaSU

        -
        public ScalarGaSU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarGaSU

            +
            public ScalarGaSU()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaTU.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaTU.html index 1c17df784..4fcdb64b1 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaTU.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaTU.html @@ -1,237 +1,373 @@ - + + - -ScalarGaTU (neureka 1.0.0 API) - - - - + +ScalarGaTU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarGaTU

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarGaTU
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarGaTU

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarGaTU
      • +
      +
    • +
    +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarGaTU

        -
        public ScalarGaTU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarGaTU

            +
            public ScalarGaTU()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussian.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussian.html index 69859aadf..34e08ccb8 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussian.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussian.html @@ -1,231 +1,367 @@ - + + - -ScalarGaussian (neureka 1.0.0 API) - - - - + +ScalarGaussian (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarGaussian

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarGaussian
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarGaussian

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarGaussian
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarGaussian
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarGaussian

        -
        public ScalarGaussian()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarGaussian

            +
            public ScalarGaussian()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussianFast.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussianFast.html index f514228e5..9cfa30dca 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussianFast.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGaussianFast.html @@ -1,231 +1,367 @@ - + + - -ScalarGaussianFast (neureka 1.0.0 API) - - - - + +ScalarGaussianFast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarGaussianFast

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarGaussianFast
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarGaussianFast

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarGaussianFast
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class ScalarGaussianFast
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarGaussianFast

        -
        public ScalarGaussianFast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarGaussianFast

            +
            public ScalarGaussianFast()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGeLU.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGeLU.html index 3d3c748e4..613e2230f 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGeLU.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarGeLU.html @@ -1,245 +1,385 @@ - + + - -ScalarGeLU (neureka 1.0.0 API) - - - - + +ScalarGeLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarGeLU

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarGeLU
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarGeLU

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarGeLU
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      ScalarFun
      +
      ScalarFun

      -
      public class ScalarGeLU -extends Object -implements ScalarFun
      +
      +
      public class ScalarGeLU
      +extends java.lang.Object
      +implements ScalarFun
      The GELU activation function is based on the standard Gaussian cumulative distribution function and is defined as x Φ( x ) and implemented as x * sigmoid(x * 1.702). The GELU non-linearity weighs inputs by their percentile, rather than gates inputs by their sign as in ReLUs. Consequently, the GELU can be thought of as a smoother ReLU.
      -
    -
    -
      + +
    +
    +
    +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.backend.main.implementations.fun.api.ScalarFun

    -calculate
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Constructor Details

        -
          -
        • -
          -

          ScalarGeLU

          -
          public ScalarGeLU()
          -
          +
            +
          • + + +

            Constructor Detail

            + + + +
              +
            • +

              ScalarGeLU

              +
              public ScalarGeLU()
            -
      • +
      -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        activationCode

        +
        public java.lang.String activationCode()
        +
        +
        Specified by:
        +
        activationCode in interface ScalarFun
        -
    • -
    • -
      -

      derivationCode

      -
      public String derivationCode()
      -
      -
      Specified by:
      -
      derivationCode in interface ScalarFun
      +
    + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
    -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • -
  • -
    -

    gelu

    -
    public static double gelu(double x)
    -
    + + + + +
      +
    • +

      gelu

      +
      public static double gelu(double x)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarIdentity.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarIdentity.html index 1a99a5f3a..0c8a3d03f 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarIdentity.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarIdentity.html @@ -1,231 +1,367 @@ - + + - -ScalarIdentity (neureka 1.0.0 API) - - - - + +ScalarIdentity (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarIdentity

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarIdentity
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarIdentity

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarIdentity
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarIdentity
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarIdentity

        -
        public ScalarIdentity()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarIdentity

            +
            public ScalarIdentity()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLog10.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLog10.html index f13024f48..5d71e98e2 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLog10.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLog10.html @@ -1,231 +1,367 @@ - + + - -ScalarLog10 (neureka 1.0.0 API) - - - - + +ScalarLog10 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarLog10

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarLog10
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarLog10

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarLog10
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class ScalarLog10
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarLog10

        -
        public ScalarLog10()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarLog10

            +
            public ScalarLog10()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLogarithm.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLogarithm.html index 4af770de3..1dfee1de4 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLogarithm.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarLogarithm.html @@ -1,231 +1,367 @@ - + + - -ScalarLogarithm (neureka 1.0.0 API) - - - - + +ScalarLogarithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarLogarithm

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarLogarithm
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarLogarithm

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarLogarithm
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarLogarithm
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarLogarithm

        -
        public ScalarLogarithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarLogarithm

            +
            public ScalarLogarithm()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarQuadratic.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarQuadratic.html index 44c14155a..6708e25f7 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarQuadratic.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarQuadratic.html @@ -1,231 +1,367 @@ - + + - -ScalarQuadratic (neureka 1.0.0 API) - - - - + +ScalarQuadratic (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarQuadratic

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarQuadratic
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarQuadratic

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarQuadratic
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarQuadratic
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarQuadratic

        -
        public ScalarQuadratic()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarQuadratic

            +
            public ScalarQuadratic()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarReLU.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarReLU.html index 691009e61..cd946c9ff 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarReLU.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarReLU.html @@ -1,231 +1,367 @@ - + + - -ScalarReLU (neureka 1.0.0 API) - - - - + +ScalarReLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarReLU

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarReLU
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarReLU

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarReLU
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarReLU
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarReLU

        -
        public ScalarReLU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarReLU

            +
            public ScalarReLU()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSeLU.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSeLU.html index 9ddef55bd..8e1103886 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSeLU.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSeLU.html @@ -1,249 +1,388 @@ - + + - -ScalarSeLU (neureka 1.0.0 API) - - - - + +ScalarSeLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSeLU

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarSeLU
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarSeLU

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarSeLU
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      ScalarFun
      +
      ScalarFun

      -
      public class ScalarSeLU -extends Object -implements ScalarFun
      +
      +
      public class ScalarSeLU
      +extends java.lang.Object
      +implements ScalarFun
      The Scaled Exponential Linear Unit, or SELU, is an activation function that induces self-normalizing properties. The SELU activation function is implemented as: - - if ( x > 0 ) return SCALE * x; + if ( x > 0 ) return SCALE * x; else if ( x <= 0 ) return SCALE * ALPHA * (Math.exp(x) - 1); else return Float.NaN;
      ...where ALPHA == 1.6733 and SCALE == 1.0507.
      -
    -
    -
      + +
    +
    +
    +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.backend.main.implementations.fun.api.ScalarFun

    -calculate
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Constructor Details

        -
          -
        • -
          -

          ScalarSeLU

          -
          public ScalarSeLU()
          -
          +
            +
          • + + +

            Constructor Detail

            + + + +
              +
            • +

              ScalarSeLU

              +
              public ScalarSeLU()
            -
      • +
      -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        activationCode

        +
        public java.lang.String activationCode()
        +
        +
        Specified by:
        +
        activationCode in interface ScalarFun
        -
    • -
    • -
      -

      derivationCode

      -
      public String derivationCode()
      -
      -
      Specified by:
      -
      derivationCode in interface ScalarFun
      +
    + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
    -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • -
  • -
    -

    selu

    -
    public static double selu(double x)
    -
    + + + + +
      +
    • +

      selu

      +
      public static double selu(double x)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSiLU.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSiLU.html index 496f4e94e..a49e7c8b5 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSiLU.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSiLU.html @@ -1,244 +1,384 @@ - + + - -ScalarSiLU (neureka 1.0.0 API) - - - - + +ScalarSiLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSiLU

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarSiLU
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarSiLU

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarSiLU
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      ScalarFun
      +
      ScalarFun

      -
      public class ScalarSiLU -extends Object -implements ScalarFun
      +
      +
      public class ScalarSiLU
      +extends java.lang.Object
      +implements ScalarFun
      The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x). It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below.
      -
    -
    -
      + +
    +
    +
    +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.backend.main.implementations.fun.api.ScalarFun

    -calculate
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Constructor Details

        -
          -
        • -
          -

          ScalarSiLU

          -
          public ScalarSiLU()
          -
          +
            +
          • + + +

            Constructor Detail

            + + + +
              +
            • +

              ScalarSiLU

              +
              public ScalarSiLU()
            -
      • +
      -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        activationCode

        +
        public java.lang.String activationCode()
        +
        +
        Specified by:
        +
        activationCode in interface ScalarFun
        -
    • -
    • -
      -

      derivationCode

      -
      public String derivationCode()
      -
      -
      Specified by:
      -
      derivationCode in interface ScalarFun
      +
    + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
    -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • -
  • -
    -

    silu

    -
    public static double silu(double x)
    -
    + + + + +
      +
    • +

      silu

      +
      public static double silu(double x)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSigmoid.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSigmoid.html index 0e860c203..781fc1670 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSigmoid.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSigmoid.html @@ -1,240 +1,380 @@ - + + - -ScalarSigmoid (neureka 1.0.0 API) - - - - + +ScalarSigmoid (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSigmoid

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarSigmoid
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarSigmoid

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarSigmoid
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarSigmoid
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.backend.main.implementations.fun.api.ScalarFun

    -calculate
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Constructor Details

        -
          -
        • -
          -

          ScalarSigmoid

          -
          public ScalarSigmoid()
          -
          +
            +
          • + + +

            Constructor Detail

            + + + +
              +
            • +

              ScalarSigmoid

              +
              public ScalarSigmoid()
            -
      • +
      -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        activationCode

        +
        public java.lang.String activationCode()
        +
        +
        Specified by:
        +
        activationCode in interface ScalarFun
        -
    • -
    • -
      -

      derivationCode

      -
      public String derivationCode()
      -
      -
      Specified by:
      -
      derivationCode in interface ScalarFun
      +
    + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
    -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • -
  • -
    -

    sig

    -
    public static double sig(double x)
    -
    + + + + +
      +
    • +

      sig

      +
      public static double sig(double x)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSinus.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSinus.html index 74a89a0bf..b15d9a8b8 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSinus.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSinus.html @@ -1,231 +1,367 @@ - + + - -ScalarSinus (neureka 1.0.0 API) - - - - + +ScalarSinus (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSinus

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarSinus
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarSinus

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarSinus
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarSinus
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarSinus

        -
        public ScalarSinus()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarSinus

            +
            public ScalarSinus()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftplus.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftplus.html index 0154b12c6..832709431 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftplus.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftplus.html @@ -1,233 +1,369 @@ - + + - -ScalarSoftplus (neureka 1.0.0 API) - - - - + +ScalarSoftplus (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSoftplus

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarSoftplus
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarSoftplus

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarSoftplus
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      ScalarFun
      +
      ScalarFun

      -
      public final class ScalarSoftplus -extends Object -implements ScalarFun
      +
      +
      public final class ScalarSoftplus
      +extends java.lang.Object
      +implements ScalarFun
      SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarSoftplus

        -
        public ScalarSoftplus()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarSoftplus

            +
            public ScalarSoftplus()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftsign.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftsign.html index 9ebf83e1e..a5c19319a 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftsign.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSoftsign.html @@ -1,255 +1,399 @@ - + + - -ScalarSoftsign (neureka 1.0.0 API) - - - - + +ScalarSoftsign (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSoftsign

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarSoftsign
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarSoftsign

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarSoftsign
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      ScalarFun
      +
      ScalarFun

      -
      public class ScalarSoftsign -extends Object -implements ScalarFun
      +
      +
      public class ScalarSoftsign
      +extends java.lang.Object
      +implements ScalarFun
      The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function - which rescales the inputs between -1 and 1, very much like the ScalarTanh function. + which rescales the inputs between -1 and 1, very much like the ScalarTanh function. The softsign function converges polynomially and is computationally cheaper than the tanh function which converges exponentially. - This makes this function a computationally cheap non-exponential quasi ScalarTanh!
      -
    -
    -
      + This makes this function a computationally cheap non-exponential quasi ScalarTanh!
    + + +
    +
    +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.backend.main.implementations.fun.api.ScalarFun

    -calculate
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Constructor Details

        -
          -
        • -
          -

          ScalarSoftsign

          -
          public ScalarSoftsign()
          -
          +
            +
          • + + +

            Constructor Detail

            + + + +
              +
            • +

              ScalarSoftsign

              +
              public ScalarSoftsign()
            -
      • +
      -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        activationCode

        +
        public java.lang.String activationCode()
        +
        +
        Specified by:
        +
        activationCode in interface ScalarFun
        -
    • -
    • -
      -

      derivationCode

      -
      public String derivationCode()
      -
      -
      Specified by:
      -
      derivationCode in interface ScalarFun
      +
    + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
    -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • -
  • -
    -

    softsign

    -
    public static double softsign(double x)
    -
    + + + + +
      +
    • +

      softsign

      +
      public static double softsign(double x)
    • -
    • -
      -

      softsign

      -
      public static float softsign(float x)
      -
      +
    + + + +
      +
    • +

      softsign

      +
      public static float softsign(float x)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSqrt.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSqrt.html index 5ba0b0224..69e9f5f7f 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSqrt.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarSqrt.html @@ -1,231 +1,367 @@ - + + - -ScalarSqrt (neureka 1.0.0 API) - - - - + +ScalarSqrt (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarSqrt

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarSqrt
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarSqrt

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarSqrt
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class ScalarSqrt
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarSqrt

        -
        public ScalarSqrt()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarSqrt

            +
            public ScalarSqrt()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanh.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanh.html index 3f960b48d..5eb411dd0 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanh.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanh.html @@ -1,249 +1,393 @@ - + + - -ScalarTanh (neureka 1.0.0 API) - - - - + +ScalarTanh (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarTanh

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarTanh
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarTanh

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarTanh
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class ScalarTanh
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.backend.main.implementations.fun.api.ScalarFun

    -calculate
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Constructor Details

        -
          -
        • -
          -

          ScalarTanh

          -
          public ScalarTanh()
          -
          +
            +
          • + + +

            Constructor Detail

            + + + +
              +
            • +

              ScalarTanh

              +
              public ScalarTanh()
            -
      • +
      -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        activationCode

        +
        public java.lang.String activationCode()
        +
        +
        Specified by:
        +
        activationCode in interface ScalarFun
        -
    • -
    • -
      -

      derivationCode

      -
      public String derivationCode()
      -
      -
      Specified by:
      -
      derivationCode in interface ScalarFun
      +
    + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
    -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • -
  • -
    -

    tanh

    -
    public static double tanh(double x)
    -
    + + + + +
      +
    • +

      tanh

      +
      public static double tanh(double x)
    • -
    • -
      -

      tanh

      -
      public static float tanh(float x)
      -
      +
    + + + +
      +
    • +

      tanh

      +
      public static float tanh(float x)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanhFast.html b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanhFast.html index c973110c9..92bfafeeb 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanhFast.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/ScalarTanhFast.html @@ -1,231 +1,367 @@ - + + - -ScalarTanhFast (neureka 1.0.0 API) - - - - + +ScalarTanhFast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ScalarTanhFast

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.fun.ScalarTanhFast
    +
    neureka.backend.main.implementations.fun
    +

    Class ScalarTanhFast

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.fun.ScalarTanhFast
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class ScalarTanhFast
      +extends java.lang.Object
      +implements ScalarFun
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ScalarTanhFast

        -
        public ScalarTanhFast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ScalarTanhFast

            +
            public ScalarTanhFast()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      activationCode

      +
      public java.lang.String activationCode()
      +
      +
      Specified by:
      +
      activationCode in interface ScalarFun
      -
  • -
  • -
    -

    derivationCode

    -
    public String derivationCode()
    -
    -
    Specified by:
    -
    derivationCode in interface ScalarFun
    + + + + +
      +
    • +

      derivationCode

      +
      public java.lang.String derivationCode()
      +
      +
      Specified by:
      +
      derivationCode in interface ScalarFun
      -
  • -
  • -
    -

    getActivation

    -
    public CPUFun getActivation()
    -
    -
    Specified by:
    -
    getActivation in interface ScalarFun
    + + + + +
  • -
  • -
    -

    getDerivative

    -
    public CPUFun getDerivative()
    -
    -
    Specified by:
    -
    getDerivative in interface ScalarFun
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUBiFun.html b/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUBiFun.html index 60d4254ee..09f09cf30 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUBiFun.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUBiFun.html @@ -1,214 +1,340 @@ - + + - -CPUBiFun (neureka 1.0.0 API) - - - - + +CPUBiFun (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface CPUBiFun

    +
    neureka.backend.main.implementations.fun.api
    +

    Interface CPUBiFun

    -
    +
    +
    +
      +

    • -
      public interface CPUBiFun
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default boolean
      -
      invoke(boolean a, - boolean b)
      -
       
      -
      default byte
      -
      invoke(byte a, - byte b)
      -
       
      -
      default char
      -
      invoke(char a, - char b)
      -
       
      -
      double
      -
      invoke(double a, - double b)
      -
       
      -
      default float
      -
      invoke(float a, - float b)
      -
       
      -
      default int
      -
      invoke(int a, - int b)
      -
       
      -
      default long
      -
      invoke(long a, - long b)
      -
       
      -
      default short
      -
      invoke(short a, - short b)
      -
       
      -
      default Object
      -
      invoke(Object a, - Object b)
      -
       
      -
      -
      +
      +
      public interface CPUBiFun
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default booleaninvoke(boolean a, + boolean b) 
        default byteinvoke(byte a, + byte b) 
        default charinvoke(char a, + char b) 
        doubleinvoke(double a, + double b) 
        default floatinvoke(float a, + float b) 
        default intinvoke(int a, + int b) 
        default longinvoke(long a, + long b) 
        default java.lang.Objectinvoke(java.lang.Object a, + java.lang.Object b) 
        default shortinvoke(short a, + short b) 
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        invoke

        -
        double invoke(double a, - double b)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            invoke

            +
            double invoke(double a,
            +              double b)
            +
          • +
          + + + +
            +
          • +

            invoke

            +
            default float invoke(float a,
            +                     float b)
          • -
          • -
            -

            invoke

            -
            default float invoke(float a, - float b)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default int invoke(int a,
            +                   int b)
          • -
          • -
            -

            invoke

            -
            default int invoke(int a, - int b)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default long invoke(long a,
            +                    long b)
          • -
          • -
            -

            invoke

            -
            default long invoke(long a, - long b)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default byte invoke(byte a,
            +                    byte b)
          • -
          • -
            -

            invoke

            -
            default byte invoke(byte a, - byte b)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default short invoke(short a,
            +                     short b)
          • -
          • -
            -

            invoke

            -
            default short invoke(short a, - short b)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default boolean invoke(boolean a,
            +                       boolean b)
          • -
          • -
            -

            invoke

            -
            default boolean invoke(boolean a, - boolean b)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default char invoke(char a,
            +                    char b)
          • -
          • -
            -

            invoke

            -
            default char invoke(char a, - char b)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default java.lang.Object invoke(java.lang.Object a,
            +                                java.lang.Object b)
          • -
          • -
            -

            invoke

            -
            default Object invoke(Object a, - Object b)
            -
            +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUFun.html b/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUFun.html index b0b43ff25..6bc6e41cd 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUFun.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/api/CPUFun.html @@ -1,196 +1,322 @@ - + + - -CPUFun (neureka 1.0.0 API) - - - - + +CPUFun (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface CPUFun

    +
    neureka.backend.main.implementations.fun.api
    +

    Interface CPUFun

    -
    +
    +
    +
      +

    • -
      public interface CPUFun
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default boolean
      -
      invoke(boolean x)
      -
       
      -
      default byte
      -
      invoke(byte x)
      -
       
      -
      default char
      -
      invoke(char x)
      -
       
      -
      double
      -
      invoke(double x)
      -
       
      -
      default float
      -
      invoke(float x)
      -
       
      -
      default int
      -
      invoke(int x)
      -
       
      -
      default long
      -
      invoke(long x)
      -
       
      -
      default short
      -
      invoke(short x)
      -
       
      -
      default Object
      - -
       
      -
      -
      +
      +
      public interface CPUFun
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        invoke

        -
        double invoke(double x)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            invoke

            +
            double invoke(double x)
            +
          • +
          + + + +
            +
          • +

            invoke

            +
            default float invoke(float x)
          • -
          • -
            -

            invoke

            -
            default float invoke(float x)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default int invoke(int x)
          • -
          • -
            -

            invoke

            -
            default int invoke(int x)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default long invoke(long x)
          • -
          • -
            -

            invoke

            -
            default long invoke(long x)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default byte invoke(byte x)
          • -
          • -
            -

            invoke

            -
            default byte invoke(byte x)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default short invoke(short x)
          • -
          • -
            -

            invoke

            -
            default short invoke(short x)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default boolean invoke(boolean x)
          • -
          • -
            -

            invoke

            -
            default boolean invoke(boolean x)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default char invoke(char x)
          • -
          • -
            -

            invoke

            -
            default char invoke(char x)
            -
            +
          + + + +
            +
          • +

            invoke

            +
            default java.lang.Object invoke(java.lang.Object x)
          • -
          • -
            -

            invoke

            -
            default Object invoke(Object x)
            -
            +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/api/ScalarFun.html b/docs/jdocs/neureka/backend/main/implementations/fun/api/ScalarFun.html index 13d87e76a..e275cb768 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/api/ScalarFun.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/api/ScalarFun.html @@ -1,402 +1,611 @@ - + + - -ScalarFun (neureka 1.0.0 API) - - - - + +ScalarFun (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ScalarFun

    +
    neureka.backend.main.implementations.fun.api
    +

    Interface ScalarFun

    -
    -
    +
    -
    -
    - +
    + - -
    -
      +
    +
    + - +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        id

        +
        java.lang.String id()
        +
      • +
      + + + +
        +
      • +

        activationCode

        +
        java.lang.String activationCode()
        +
      • +
      + + + +
        +
      • +

        derivationCode

        +
        java.lang.String derivationCode()
        +
      • +
      + + + +
        +
      • +

        calculate

        +
        default double calculate(double input,
        +                         boolean derive)
        +
      • +
      + + + +
        +
      • +

        getActivation

        +
        CPUFun getActivation()
        +
      • +
      + + + +
        +
      • +

        getDerivative

        +
        CPUFun getDerivative()
        +
      • +
      +
    • +
    + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/api/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/fun/api/package-frame.html new file mode 100644 index 000000000..0c7151c56 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/fun/api/package-frame.html @@ -0,0 +1,21 @@ + + + + + +neureka.backend.main.implementations.fun.api (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.fun.api

    +
    +

    Interfaces

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/api/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/fun/api/package-summary.html index 62d7158f3..350480b16 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/api/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/api/package-summary.html @@ -1,97 +1,147 @@ - + + - -neureka.backend.main.implementations.fun.api (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.fun.api (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.fun.api

    -
    -
    -
    package neureka.backend.main.implementations.fun.api
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/api/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/fun/api/package-tree.html index 114054eea..28bf8671d 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/api/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/api/package-tree.html @@ -1,69 +1,132 @@ - + + - -neureka.backend.main.implementations.fun.api Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.fun.api Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.fun.api

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

      -
    • neureka.backend.main.implementations.fun.api.CPUBiFun
    • -
    • neureka.backend.main.implementations.fun.api.CPUFun
    • -
    • neureka.backend.main.implementations.fun.api.ScalarFun
    • +
    • neureka.backend.main.implementations.fun.api.CPUBiFun
    • +
    • neureka.backend.main.implementations.fun.api.CPUFun
    • +
    • neureka.backend.main.implementations.fun.api.ScalarFun
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/fun/package-frame.html new file mode 100644 index 000000000..20d6df4d1 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/fun/package-frame.html @@ -0,0 +1,41 @@ + + + + + +neureka.backend.main.implementations.fun (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.fun

    + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/fun/package-summary.html index e406ff35a..06962817c 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/package-summary.html @@ -1,168 +1,252 @@ - + + - -neureka.backend.main.implementations.fun (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.fun (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.fun

    -
    -
    -
    package neureka.backend.main.implementations.fun
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/fun/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/fun/package-tree.html index f57e211ea..a5789d48b 100644 --- a/docs/jdocs/neureka/backend/main/implementations/fun/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/fun/package-tree.html @@ -1,93 +1,156 @@ - + + - -neureka.backend.main.implementations.fun Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.fun Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.fun

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.backend.main.implementations.fun.ScalarAbsolute (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarCbrt (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarCosinus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarExp (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarGaSU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarGaTU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarGaussian (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarGaussianFast (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarGeLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarIdentity (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarLog10 (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarLogarithm (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarQuadratic (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarReLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarSeLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarSigmoid (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarSiLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarSinus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarSoftplus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarSoftsign (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarSqrt (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarTanh (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • -
      • neureka.backend.main.implementations.fun.ScalarTanhFast (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarAbsolute (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarCbrt (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarCosinus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarExp (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarGaSU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarGaTU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarGaussian (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarGaussianFast (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarGeLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarIdentity (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarLog10 (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarLogarithm (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarQuadratic (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarReLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarSeLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarSigmoid (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarSiLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarSinus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarSoftplus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarSoftsign (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarSqrt (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarTanh (implements neureka.backend.main.implementations.fun.api.ScalarFun)
      • +
      • neureka.backend.main.implementations.fun.ScalarTanhFast (implements neureka.backend.main.implementations.fun.api.ScalarFun)
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/linear/CLDot.html b/docs/jdocs/neureka/backend/main/implementations/linear/CLDot.html index 0b01b5a42..176a516e0 100644 --- a/docs/jdocs/neureka/backend/main/implementations/linear/CLDot.html +++ b/docs/jdocs/neureka/backend/main/implementations/linear/CLDot.html @@ -1,181 +1,291 @@ - + + - -CLDot (neureka 1.0.0 API) - - - - + +CLDot (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLDot

    +
    neureka.backend.main.implementations.linear
    +

    Class CLDot

    -
    java.lang.Object -
    neureka.backend.main.implementations.linear.CLDot
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.linear.CLDot
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLDot

        -
        public CLDot()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLDot

            +
            public CLDot()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<OpenCLDevice>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<OpenCLDevice>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/linear/CPUDot.html b/docs/jdocs/neureka/backend/main/implementations/linear/CPUDot.html index a84fdeaa7..03ced7a97 100644 --- a/docs/jdocs/neureka/backend/main/implementations/linear/CPUDot.html +++ b/docs/jdocs/neureka/backend/main/implementations/linear/CPUDot.html @@ -1,180 +1,290 @@ - + + - -CPUDot (neureka 1.0.0 API) - - - - + +CPUDot (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUDot

    +
    neureka.backend.main.implementations.linear
    +

    Class CPUDot

    -
    java.lang.Object -
    neureka.backend.main.implementations.linear.CPUDot
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.linear.CPUDot
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CPUDot
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        CPUDot() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
      + -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUDot

        -
        public CPUDot()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUDot

            +
            public CPUDot()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/linear/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/linear/package-frame.html new file mode 100644 index 000000000..5ac09b912 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/linear/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.backend.main.implementations.linear (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.linear

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/linear/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/linear/package-summary.html index c2cf00ad2..ee818146e 100644 --- a/docs/jdocs/neureka/backend/main/implementations/linear/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/linear/package-summary.html @@ -1,101 +1,145 @@ - + + - -neureka.backend.main.implementations.linear (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.linear (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.linear

    +

    Package neureka.backend.main.implementations.linear

    -
    -
    package neureka.backend.main.implementations.linear
    -
    -
      -
    • - -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        CLDot
        Performs a dot product on two vectors using OpenCL.
        - - -
         
        - - +
        CPUDot 
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/linear/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/linear/package-tree.html index 2934fe07f..bf47bdfd8 100644 --- a/docs/jdocs/neureka/backend/main/implementations/linear/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/linear/package-tree.html @@ -1,72 +1,135 @@ - + + - -neureka.backend.main.implementations.linear Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.linear Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.linear

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/matmul/CLMatMul.html b/docs/jdocs/neureka/backend/main/implementations/matmul/CLMatMul.html index 446c910e0..ac7a4113b 100644 --- a/docs/jdocs/neureka/backend/main/implementations/matmul/CLMatMul.html +++ b/docs/jdocs/neureka/backend/main/implementations/matmul/CLMatMul.html @@ -1,143 +1,267 @@ - + + - -CLMatMul (neureka 1.0.0 API) - - - - + +CLMatMul (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLMatMul

    -
    - -
    -
    +
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLMatMul

        -
        public CLMatMul()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLMatMul

            +
            public CLMatMul()
          -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/matmul/CPUMatMul.html b/docs/jdocs/neureka/backend/main/implementations/matmul/CPUMatMul.html index fec11c2d6..cf9663ad6 100644 --- a/docs/jdocs/neureka/backend/main/implementations/matmul/CPUMatMul.html +++ b/docs/jdocs/neureka/backend/main/implementations/matmul/CPUMatMul.html @@ -1,265 +1,391 @@ - + + - -CPUMatMul (neureka 1.0.0 API) - - - - + +CPUMatMul (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUMatMul

    -
    -
    java.lang.Object -
    neureka.backend.main.implementations.matmul.CPUMatMul
    +
    neureka.backend.main.implementations.matmul
    +

    Class CPUMatMul

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.matmul.CPUMatMul
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        CPUMatMul() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static void
      -
      execute(boolean rowMajor, - double[] A, - double[] B, - double[] C, - int aRows, - int aCols, - int bCols)
      -
       
      -
      static void
      -
      execute(boolean rowMajor, - float[] A, - float[] B, - float[] C, - int aRows, - int aCols, - int bCols)
      -
       
      -
      static void
      -
      execute(boolean rowMajor, - int[] A, - int[] B, - int[] C, - int aRows, - int aCols, - int bCols)
      -
       
      -
      static void
      -
      execute(boolean rowMajor, - long[] A, - long[] B, - long[] C, - int aRows, - int aCols, - int bCols)
      -
       
      - - -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidexecute(boolean rowMajor, + double[] A, + double[] B, + double[] C, + int aRows, + int aCols, + int bCols) 
        static voidexecute(boolean rowMajor, + float[] A, + float[] B, + float[] C, + int aRows, + int aCols, + int bCols) 
        static voidexecute(boolean rowMajor, + int[] A, + int[] B, + int[] C, + int aRows, + int aCols, + int bCols) 
        static voidexecute(boolean rowMajor, + long[] A, + long[] B, + long[] C, + int aRows, + int aCols, + int bCols) 
        Tensor<?>run(ExecutionCall<CPU> call)
        This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented.
        - - - - -
        -

        Methods inherited from class java.lang.Object

        -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        - +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUMatMul

        -
        public CPUMatMul()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUMatMul

            +
            public CPUMatMul()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    • -
    • -
      -

      execute

      -
      public static void execute(boolean rowMajor, - double[] A, - double[] B, - double[] C, - int aRows, - int aCols, - int bCols)
      -
      +
    + + + +
      +
    • +

      execute

      +
      public static void execute(boolean rowMajor,
      +                           double[] A,
      +                           double[] B,
      +                           double[] C,
      +                           int aRows,
      +                           int aCols,
      +                           int bCols)
    • -
    • -
      -

      execute

      -
      public static void execute(boolean rowMajor, - float[] A, - float[] B, - float[] C, - int aRows, - int aCols, - int bCols)
      -
      +
    + + + +
      +
    • +

      execute

      +
      public static void execute(boolean rowMajor,
      +                           float[] A,
      +                           float[] B,
      +                           float[] C,
      +                           int aRows,
      +                           int aCols,
      +                           int bCols)
    • -
    • -
      -

      execute

      -
      public static void execute(boolean rowMajor, - long[] A, - long[] B, - long[] C, - int aRows, - int aCols, - int bCols)
      -
      +
    + + + +
      +
    • +

      execute

      +
      public static void execute(boolean rowMajor,
      +                           long[] A,
      +                           long[] B,
      +                           long[] C,
      +                           int aRows,
      +                           int aCols,
      +                           int bCols)
    • -
    • -
      -

      execute

      -
      public static void execute(boolean rowMajor, - int[] A, - int[] B, - int[] C, - int aRows, - int aCols, - int bCols)
      -
      +
    + + + +
      +
    • +

      execute

      +
      public static void execute(boolean rowMajor,
      +                           int[] A,
      +                           int[] B,
      +                           int[] C,
      +                           int aRows,
      +                           int aCols,
      +                           int bCols)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/matmul/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/matmul/package-frame.html new file mode 100644 index 000000000..ec50456c7 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/matmul/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.backend.main.implementations.matmul (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.matmul

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/matmul/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/matmul/package-summary.html index ea5b26ff2..e3344fd91 100644 --- a/docs/jdocs/neureka/backend/main/implementations/matmul/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/matmul/package-summary.html @@ -1,101 +1,145 @@ - + + - -neureka.backend.main.implementations.matmul (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.matmul (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.matmul

    +

    Package neureka.backend.main.implementations.matmul

    -
    -
    package neureka.backend.main.implementations.matmul
    -
    -
      -
    • - -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
       
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        CLMatMul 
        CPUMatMul
        This is a library internal class, do not depend on this.
        - - - +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/matmul/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/matmul/package-tree.html index 46ad6aecc..489a34f29 100644 --- a/docs/jdocs/neureka/backend/main/implementations/matmul/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/matmul/package-tree.html @@ -1,84 +1,147 @@ - + + - -neureka.backend.main.implementations.matmul Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.matmul Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.matmul

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/package-frame.html new file mode 100644 index 000000000..4f4916f55 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/package-frame.html @@ -0,0 +1,26 @@ + + + + + +neureka.backend.main.implementations (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations

    + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/package-summary.html index b11f033c5..360021b67 100644 --- a/docs/jdocs/neureka/backend/main/implementations/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/package-summary.html @@ -1,135 +1,190 @@ - + + - -neureka.backend.main.implementations (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations

    -
    -
    -
    package neureka.backend.main.implementations
    -
    +

    Package neureka.backend.main.implementations

    +
    Everything in this package should be considered library-private! DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! Code inside this package or any sub-packages might change frequently...
    -
    -
    -
    -
    + + + +

    Package neureka.backend.main.implementations Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/package-tree.html index 1feba952b..a7121f0b3 100644 --- a/docs/jdocs/neureka/backend/main/implementations/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/package-tree.html @@ -1,87 +1,148 @@ - + + - -neureka.backend.main.implementations Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/scalar/CLScalarFunction.html b/docs/jdocs/neureka/backend/main/implementations/scalar/CLScalarFunction.html index 2f4676afe..863d2bfec 100644 --- a/docs/jdocs/neureka/backend/main/implementations/scalar/CLScalarFunction.html +++ b/docs/jdocs/neureka/backend/main/implementations/scalar/CLScalarFunction.html @@ -1,180 +1,290 @@ - + + - -CLScalarFunction (neureka 1.0.0 API) - - - - + +CLScalarFunction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLScalarFunction

    +
    neureka.backend.main.implementations.scalar
    +

    Class CLScalarFunction

    -
    java.lang.Object -
    neureka.backend.main.implementations.scalar.CLScalarFunction
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.scalar.CLScalarFunction
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLScalarFunction

        -
        public CLScalarFunction(ScalarFun fun)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLScalarFunction

            +
            public CLScalarFunction(ScalarFun fun)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<OpenCLDevice>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<OpenCLDevice>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarBroadcastFunction.html b/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarBroadcastFunction.html index edea5c17c..f663eba28 100644 --- a/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarBroadcastFunction.html +++ b/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarBroadcastFunction.html @@ -1,180 +1,290 @@ - + + - -CPUScalarBroadcastFunction (neureka 1.0.0 API) - - - - + +CPUScalarBroadcastFunction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarBroadcastFunction

    +
    neureka.backend.main.implementations.scalar
    +

    Class CPUScalarBroadcastFunction

    -
    java.lang.Object -
    neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarBroadcastFunction

        -
        public CPUScalarBroadcastFunction(ScalarFun fun)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarBroadcastFunction

            +
            public CPUScalarBroadcastFunction(ScalarFun fun)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarFunction.html b/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarFunction.html index a22cd99b9..5c5546b18 100644 --- a/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarFunction.html +++ b/docs/jdocs/neureka/backend/main/implementations/scalar/CPUScalarFunction.html @@ -1,180 +1,290 @@ - + + - -CPUScalarFunction (neureka 1.0.0 API) - - - - + +CPUScalarFunction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUScalarFunction

    +
    neureka.backend.main.implementations.scalar
    +

    Class CPUScalarFunction

    -
    java.lang.Object -
    neureka.backend.main.implementations.scalar.CPUScalarFunction
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.implementations.scalar.CPUScalarFunction
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUScalarFunction

        -
        public CPUScalarFunction(ScalarFun fun)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUScalarFunction

            +
            public CPUScalarFunction(ScalarFun fun)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/scalar/package-frame.html b/docs/jdocs/neureka/backend/main/implementations/scalar/package-frame.html new file mode 100644 index 000000000..0627b58df --- /dev/null +++ b/docs/jdocs/neureka/backend/main/implementations/scalar/package-frame.html @@ -0,0 +1,21 @@ + + + + + +neureka.backend.main.implementations.scalar (neureka 1.0.1 API) + + + + +

    neureka.backend.main.implementations.scalar

    + + + diff --git a/docs/jdocs/neureka/backend/main/implementations/scalar/package-summary.html b/docs/jdocs/neureka/backend/main/implementations/scalar/package-summary.html index 5320dd33d..e008f3e21 100644 --- a/docs/jdocs/neureka/backend/main/implementations/scalar/package-summary.html +++ b/docs/jdocs/neureka/backend/main/implementations/scalar/package-summary.html @@ -1,101 +1,147 @@ - + + - -neureka.backend.main.implementations.scalar (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.scalar (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.implementations.scalar

    -
    -
    -
    package neureka.backend.main.implementations.scalar
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/implementations/scalar/package-tree.html b/docs/jdocs/neureka/backend/main/implementations/scalar/package-tree.html index f6716c8b9..db265510c 100644 --- a/docs/jdocs/neureka/backend/main/implementations/scalar/package-tree.html +++ b/docs/jdocs/neureka/backend/main/implementations/scalar/package-tree.html @@ -1,73 +1,136 @@ - + + - -neureka.backend.main.implementations.scalar Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.implementations.scalar Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.implementations.scalar

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/internal/FinalExecutor.html b/docs/jdocs/neureka/backend/main/internal/FinalExecutor.html index 5a11c46b3..4e5db67c8 100644 --- a/docs/jdocs/neureka/backend/main/internal/FinalExecutor.html +++ b/docs/jdocs/neureka/backend/main/internal/FinalExecutor.html @@ -1,124 +1,218 @@ - + + - -FinalExecutor (neureka 1.0.0 API) - - - - + +FinalExecutor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface FinalExecutor

    +
    neureka.backend.main.internal
    +

    Interface FinalExecutor

    -
    +
    +
    +
      +

    • -
      public interface FinalExecutor
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      execute(ExecutionCall<? extends Device<?>> call)
      -
       
      -
      -
      +
      +
      public interface FinalExecutor
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/internal/package-frame.html b/docs/jdocs/neureka/backend/main/internal/package-frame.html new file mode 100644 index 000000000..04f52222e --- /dev/null +++ b/docs/jdocs/neureka/backend/main/internal/package-frame.html @@ -0,0 +1,19 @@ + + + + + +neureka.backend.main.internal (neureka 1.0.1 API) + + + + +

    neureka.backend.main.internal

    +
    +

    Interfaces

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/internal/package-summary.html b/docs/jdocs/neureka/backend/main/internal/package-summary.html index a2a1d3b48..57f3846fe 100644 --- a/docs/jdocs/neureka/backend/main/internal/package-summary.html +++ b/docs/jdocs/neureka/backend/main/internal/package-summary.html @@ -1,87 +1,152 @@ - + + - -neureka.backend.main.internal (neureka 1.0.0 API) - - - - + +neureka.backend.main.internal (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.internal

    -
    -
    -
    package neureka.backend.main.internal
    -
    +

    Package neureka.backend.main.internal

    +
    Everything in this package should be considered library-private! DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! Code inside this package or any sub-packages might change frequently...
    -
    -
    -
    -
    + + + +

    Package neureka.backend.main.internal Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    + + + + diff --git a/docs/jdocs/neureka/backend/main/internal/package-tree.html b/docs/jdocs/neureka/backend/main/internal/package-tree.html index 9c01df1a3..828e388ff 100644 --- a/docs/jdocs/neureka/backend/main/internal/package-tree.html +++ b/docs/jdocs/neureka/backend/main/internal/package-tree.html @@ -1,67 +1,130 @@ - + + - -neureka.backend.main.internal Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.internal Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.internal

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/memory/MemUtil.html b/docs/jdocs/neureka/backend/main/memory/MemUtil.html index deef64bbf..55abbbe79 100644 --- a/docs/jdocs/neureka/backend/main/memory/MemUtil.html +++ b/docs/jdocs/neureka/backend/main/memory/MemUtil.html @@ -1,217 +1,328 @@ - + + - -MemUtil (neureka 1.0.0 API) - - - - + +MemUtil (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class MemUtil

    +
    neureka.backend.main.memory
    +

    Class MemUtil

    -
    java.lang.Object -
    neureka.backend.main.memory.MemUtil
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.memory.MemUtil
      • +
      +
    • +
    +
    +
      +

    • -
      public class MemUtil -extends Object
      +
      +
      public class MemUtil
      +extends java.lang.Object
      Utility methods for deleting tensors or preventing thereof. In essence, it exposes convenience methods for setting and resetting - the Tensor.isIntermediate() flag or supplied tensors... + the Tensor.isIntermediate() flag or supplied tensors... This is an internal library class which should not be used anywhere but in Neurekas backend. Do not use this anywhere else!
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static void
      -
      autoDelete(Tensor<?>... tensors)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidautoDelete(Tensor<?>... tensors)
        This method will try to delete the provided array of tensors if the tensors are not important computation graph components (like derivatives for example).
        - -
        static <T> T
        -
        keep(Tensor<?>[] tensors, - Supplier<T> during)
        -
        +
        static <T> Tkeep(Tensor<?>[] tensors, + java.util.function.Supplier<T> during)
        This method makes sure that the provided tensors do not get deleted - by setting the Tensor.isIntermediate() flag to off - during the execution of the provided Supplier lambda! + by setting the Tensor.isIntermediate() flag to off + during the execution of the provided Supplier lambda! In said lambda the supplied thing will ultimately be returned by this method...
        - -
        static <T> T
        -
        keep(Tensor<?> a, - Tensor<?> b, - Supplier<T> during)
        -
        +
        static <T> Tkeep(Tensor<?> a, + Tensor<?> b, + java.util.function.Supplier<T> during)
        This method makes sure that the provided tensors do not get deleted - by setting the Tensor.isIntermediate() flag to off - during the execution of the provided Supplier lambda! + by setting the Tensor.isIntermediate() flag to off + during the execution of the provided Supplier lambda! In said lambda the supplied thing will ultimately be returned by this method...
        - - - - -
        -

        Methods inherited from class java.lang.Object

        -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        - +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        autoDelete

        -
        public static void autoDelete(Tensor<?>... tensors)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            autoDelete

            +
            public static void autoDelete(Tensor<?>... tensors)
            This method will try to delete the provided array of tensors if the tensors are not important computation graph components (like derivatives for example).
            -
            -
            Parameters:
            +
            +
            Parameters:
            tensors - The tensors which should be deleted if possible.
            -
      • -
      • -
        -

        keep

        -
        public static <T> T keep(Tensor<?>[] tensors, - Supplier<T> during)
        +
      + + + +
        +
      • +

        keep

        +
        public static <T> T keep(Tensor<?>[] tensors,
        +                         java.util.function.Supplier<T> during)
        This method makes sure that the provided tensors do not get deleted - by setting the Tensor.isIntermediate() flag to off - during the execution of the provided Supplier lambda! + by setting the Tensor.isIntermediate() flag to off + during the execution of the provided Supplier lambda! In said lambda the supplied thing will ultimately be returned by this method... - All provided tensors will have the Tensor.isIntermediate() flag + All provided tensors will have the Tensor.isIntermediate() flag set to their original state after execution.
        -
        -
        Type Parameters:
        +
        +
        Type Parameters:
        T - The type of the result produced by the provided lambda.
        -
        Parameters:
        +
        Parameters:
        tensors - An array of tensors which should not be deleted during the execution of the supplied lambda.
        during - A lambda producing a result during which the provided tensors should not be deleted.
        -
        Returns:
        +
        Returns:
        The result produced by the provided lambda.
        -
    • -
    • -
      -

      keep

      -
      public static <T> T keep(Tensor<?> a, - Tensor<?> b, - Supplier<T> during)
      +
    + + + +
      +
    • +

      keep

      +
      public static <T> T keep(Tensor<?> a,
      +                         Tensor<?> b,
      +                         java.util.function.Supplier<T> during)
      This method makes sure that the provided tensors do not get deleted - by setting the Tensor.isIntermediate() flag to off - during the execution of the provided Supplier lambda! + by setting the Tensor.isIntermediate() flag to off + during the execution of the provided Supplier lambda! In said lambda the supplied thing will ultimately be returned by this method... - Both of the provided tensors will have the Tensor.isIntermediate() flag + Both of the provided tensors will have the Tensor.isIntermediate() flag set to their original state after execution.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type of the result produced by the provided lambda.
      -
      Parameters:
      +
      Parameters:
      a - The first tensor which should not be deleted during the execution of the provided lambda.
      b - The second tensor which should not be deleted during the execution of the provided lambda.
      during - A lambda producing a result during whose execution the first to arguments should not be deleted.
      -
      Returns:
      +
      Returns:
      The result produced by the provided lambda.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/memory/MemValidator.html b/docs/jdocs/neureka/backend/main/memory/MemValidator.html index 8d3b814de..3e9ceaff0 100644 --- a/docs/jdocs/neureka/backend/main/memory/MemValidator.html +++ b/docs/jdocs/neureka/backend/main/memory/MemValidator.html @@ -1,187 +1,302 @@ - + + - -MemValidator (neureka 1.0.0 API) - - - - + +MemValidator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class MemValidator

    +
    neureka.backend.main.memory
    +

    Class MemValidator

    -
    java.lang.Object -
    neureka.backend.main.memory.MemValidator
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.memory.MemValidator
      • +
      +
    • +
    +
    +
      +

    • -
      public class MemValidator -extends Object
      +
      +
      public class MemValidator
      +extends java.lang.Object
      This class validates the states of tensors with respect to memory management before and after a lambda executes a function or some kind of algorithm on said tensors. - This validity refers to the Tensor.isIntermediate() flag, whose state should + This validity refers to the Tensor.isIntermediate() flag, whose state should adhere to strict rules in order to allow for safe deletion of tensors. - The lambda wrapped by this may be a Function call or a lower level - procedure defined a Algorithm implementation. + The lambda wrapped by this may be a Function call or a lower level + procedure defined a Algorithm implementation.

      Warning! This is an internal class. Do not depend on it.
      -
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        forInputs

        -
        public static MemValidator forInputs(Tensor<?>[] inputs, - Supplier<Result> resultProvider)
        -
        -
        Parameters:
        -
        inputs - The inputs used by the Supplier implementation to provide a result.
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            forInputs

            +
            public static MemValidator forInputs(Tensor<?>[] inputs,
            +                                     java.util.function.Supplier<Result> resultProvider)
            +
            +
            Parameters:
            +
            inputs - The inputs used by the Supplier implementation to provide a result.
            resultProvider - The callback providing the result which ought to be validated.
            -
            Returns:
            -
            The MemValidator which ought to validate the provided result.
            +
            Returns:
            +
            The MemValidator which ought to validate the provided result.
            -
      • -
      • -
        -

        isWronglyIntermediate

        -
        public boolean isWronglyIntermediate()
        -
        -
        Returns:
        -
        Is true if the result tensor is wrongfully flagged as intermediate (see Tensor.isIntermediate()).
        +
      + + + +
        +
      • +

        isWronglyIntermediate

        +
        public boolean isWronglyIntermediate()
        +
        +
        Returns:
        +
        Is true if the result tensor is wrongfully flagged as intermediate (see Tensor.isIntermediate()).
        -
    • -
    • -
      -

      isWronglyNonIntermediate

      -
      public boolean isWronglyNonIntermediate()
      -
      -
      Returns:
      -
      Is true if the result tensor is wrongfully flagged as non-intermediate (see Tensor.isIntermediate()).
      +
    + + + +
      +
    • +

      isWronglyNonIntermediate

      +
      public boolean isWronglyNonIntermediate()
      +
      +
      Returns:
      +
      Is true if the result tensor is wrongfully flagged as non-intermediate (see Tensor.isIntermediate()).
      -
    • -
    • -
      -

      getResult

      -
      public Result getResult()
      -
      -
      Returns:
      -
      The result tensor returned by the Supplier lambda passed to this MemValidator.
      +
    + + + +
      +
    • +

      getResult

      +
      public Result getResult()
      +
      +
      Returns:
      +
      The result tensor returned by the Supplier lambda passed to this MemValidator.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/memory/package-frame.html b/docs/jdocs/neureka/backend/main/memory/package-frame.html new file mode 100644 index 000000000..7bbefeeef --- /dev/null +++ b/docs/jdocs/neureka/backend/main/memory/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.backend.main.memory (neureka 1.0.1 API) + + + + +

    neureka.backend.main.memory

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/memory/package-summary.html b/docs/jdocs/neureka/backend/main/memory/package-summary.html index ce251d41b..87081fb72 100644 --- a/docs/jdocs/neureka/backend/main/memory/package-summary.html +++ b/docs/jdocs/neureka/backend/main/memory/package-summary.html @@ -1,94 +1,161 @@ - + + - -neureka.backend.main.memory (neureka 1.0.0 API) - - - - + +neureka.backend.main.memory (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.memory

    -
    -
    -
    package neureka.backend.main.memory
    -
    +

    Package neureka.backend.main.memory

    +
    Everything in this package should be considered library-private! DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! Code inside this package or any sub-packages might change frequently...
    -
    -
    -
      -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      -
      Utility methods for deleting tensors or preventing thereof.
      - -
      +

      See: Description

      +
      +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        MemUtil +
        Utility methods for deleting tensors or preventing thereof.
        +
        MemValidator
        This class validates the states of tensors with respect to memory management before and after a lambda executes a function or some kind of algorithm on said tensors.
        - - - +
      -
    -
    + + + +

    Package neureka.backend.main.memory Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    + + + + diff --git a/docs/jdocs/neureka/backend/main/memory/package-tree.html b/docs/jdocs/neureka/backend/main/memory/package-tree.html index d234f84b7..8719ec868 100644 --- a/docs/jdocs/neureka/backend/main/memory/package-tree.html +++ b/docs/jdocs/neureka/backend/main/memory/package-tree.html @@ -1,72 +1,135 @@ - + + - -neureka.backend.main.memory Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.memory Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.memory

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/ConvUtil.html b/docs/jdocs/neureka/backend/main/operations/ConvUtil.html index 53527927d..d762935ab 100644 --- a/docs/jdocs/neureka/backend/main/operations/ConvUtil.html +++ b/docs/jdocs/neureka/backend/main/operations/ConvUtil.html @@ -1,180 +1,298 @@ - + + - -ConvUtil (neureka 1.0.0 API) - - - - + +ConvUtil (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ConvUtil

    -
    -
    java.lang.Object -
    neureka.backend.main.operations.ConvUtil
    +
    neureka.backend.main.operations
    +

    Class ConvUtil

    -
    -
    -
    public class ConvUtil -extends Object
    -
    -
    -
      - +
      +
        +
      • java.lang.Object
      • -
        -

        Constructor Summary

        -
        Constructors
        -
        -
        Constructor
        -
        Description
        - -
         
        +
          +
        • neureka.backend.main.operations.ConvUtil
        • +
        +
      • +
      +
      +
        +
      • +
        +
        +
        public class ConvUtil
        +extends java.lang.Object
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ConvUtil

        -
        public ConvUtil()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ConvUtil

            +
            public ConvUtil()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      createDeconvolutionFor

      -
      public static NDConvolution createDeconvolutionFor(String op)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          createDeconvolutionFor

          +
          public static NDConvolution createDeconvolutionFor(java.lang.String op)
        • -
        • -
          -

          shapeOfCon

          -
          public static Shape shapeOfCon(int[] shape1, - int[] shape2)
          -
          +
        + + + +
          +
        • +

          shapeOfCon

          +
          public static Shape shapeOfCon(int[] shape1,
          +                               int[] shape2)
        • -
        • -
          -

          executeRecursively

          -
          public static Tensor<?> executeRecursively(String op, - ExecutionCall<? extends Device<?>> call)
          -
          +
        + + + +
          +
        • +

          executeRecursively

          +
          public static Tensor<?> executeRecursively(java.lang.String op,
          +                                           ExecutionCall<? extends Device<?>> call)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/ElemWiseUtil.html b/docs/jdocs/neureka/backend/main/operations/ElemWiseUtil.html index 82981c3b3..0290c505f 100644 --- a/docs/jdocs/neureka/backend/main/operations/ElemWiseUtil.html +++ b/docs/jdocs/neureka/backend/main/operations/ElemWiseUtil.html @@ -1,179 +1,293 @@ - + + - -ElemWiseUtil (neureka 1.0.0 API) - - - - + +ElemWiseUtil (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ElemWiseUtil

    +
    neureka.backend.main.operations
    +

    Class ElemWiseUtil

    -
    java.lang.Object -
    neureka.backend.main.operations.ElemWiseUtil
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.ElemWiseUtil
      • +
      +
    • +
    +
    +
      +

    • -
      public class ElemWiseUtil -extends Object
      -
      Methods inside this utility class execute only some ExecutionCall arguments +
      +
      public class ElemWiseUtil
      +extends java.lang.Object
      +
      Methods inside this utility class execute only some ExecutionCall arguments in groups if their total number exceeds the arity of an operation.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ElemWiseUtil

        -
        public ElemWiseUtil()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ElemWiseUtil

            +
            public ElemWiseUtil()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      newTensorLike

      -
      public static <V> Tensor<V> newTensorLike(Tensor<V> template, - double value)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          newTensorLike

          +
          public static <V> Tensor<V> newTensorLike(Tensor<V> template,
          +                                          double value)
        • -
        • -
          -

          newTensorLike

          -
          public static <V> Tensor<V> newTensorLike(Class<V> type, - Shape shape, - boolean isOutsourced, - Device<Object> device, - double value)
          -
          +
        + + + +
          +
        • +

          newTensorLike

          +
          public static <V> Tensor<V> newTensorLike(java.lang.Class<V> type,
          +                                          Shape shape,
          +                                          boolean isOutsourced,
          +                                          Device<java.lang.Object> device,
          +                                          double value)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Absolute.html b/docs/jdocs/neureka/backend/main/operations/functions/Absolute.html index 1d5172c40..d4513d5d5 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Absolute.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Absolute.html @@ -1,210 +1,286 @@ - + + - -Absolute (neureka 1.0.0 API) - - - - + +Absolute (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Absolute

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Absolute
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Absolute

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Absolute

        -
        public Absolute()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Absolute

            +
            public Absolute()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Cbrt.html b/docs/jdocs/neureka/backend/main/operations/functions/Cbrt.html index 47981cf71..14df6a6c2 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Cbrt.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Cbrt.html @@ -1,210 +1,286 @@ - + + - -Cbrt (neureka 1.0.0 API) - - - - + +Cbrt (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Cbrt
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Cbrt

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Cbrt

        -
        public Cbrt()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Cbrt

            +
            public Cbrt()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Cosinus.html b/docs/jdocs/neureka/backend/main/operations/functions/Cosinus.html index c1d7e773a..b6516ccd3 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Cosinus.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Cosinus.html @@ -1,210 +1,286 @@ - + + - -Cosinus (neureka 1.0.0 API) - - - - + +Cosinus (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Cosinus

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Cosinus
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Cosinus

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Cosinus

        -
        public Cosinus()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Cosinus

            +
            public Cosinus()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Exp.html b/docs/jdocs/neureka/backend/main/operations/functions/Exp.html index 463f0aa6e..334ad4449 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Exp.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Exp.html @@ -1,210 +1,286 @@ - + + - -Exp (neureka 1.0.0 API) - - - - + +Exp (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Exp
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Exp

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Exp

        -
        public Exp()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Exp

            +
            public Exp()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/GaSU.html b/docs/jdocs/neureka/backend/main/operations/functions/GaSU.html index 4c3682a68..ab5133440 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/GaSU.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/GaSU.html @@ -1,216 +1,292 @@ - + + - -GaSU (neureka 1.0.0 API) - - - - + +GaSU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.GaSU
    -
    +
    neureka.backend.main.operations.functions
    +

    Class GaSU

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public class GaSU -extends AbstractOperation
      -
      The Self Gated Softsign Unit is based on the Softsign function - (a computationally cheap non-exponential quasi Tanh) - making it a polynomially based version of the GaTU function which - is itself based on the Tanh function. - Similar as the Softsign and Tanh function GaSU +
      +
      public class GaSU
      +extends AbstractOperation
      +
      The Self Gated Softsign Unit is based on the Softsign function + (a computationally cheap non-exponential quasi Tanh) + making it a polynomially based version of the GaTU function which + is itself based on the Tanh function. + Similar as the Softsign and Tanh function GaSU is 0 centered and capped by -1 and +1.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        GaSU

        -
        public GaSU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            GaSU

            +
            public GaSU()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -226,25 +302,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/GaTU.html b/docs/jdocs/neureka/backend/main/operations/functions/GaTU.html index 66d02f56c..e7a8e3fb0 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/GaTU.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/GaTU.html @@ -1,216 +1,292 @@ - + + - -GaTU (neureka 1.0.0 API) - - - - + +GaTU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.GaTU
    -
    +
    neureka.backend.main.operations.functions
    +

    Class GaTU

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public class GaTU -extends AbstractOperation
      -
      The Self Gated Tanh Unit is based on the Tanh - making it an exponentiation based version of the GaSU function which - is itself based on the Softsign function - (a computationally cheap non-exponential quasi Tanh). - Similar a the Softsign and Tanh function GaTU +
      +
      public class GaTU
      +extends AbstractOperation
      +
      The Self Gated Tanh Unit is based on the Tanh + making it an exponentiation based version of the GaSU function which + is itself based on the Softsign function + (a computationally cheap non-exponential quasi Tanh). + Similar a the Softsign and Tanh function GaTU is 0 centered and caped by -1 and +1.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        GaTU

        -
        public GaTU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            GaTU

            +
            public GaTU()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -226,25 +302,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Gaussian.html b/docs/jdocs/neureka/backend/main/operations/functions/Gaussian.html index 953e721a4..8403e5c17 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Gaussian.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Gaussian.html @@ -1,210 +1,286 @@ - + + - -Gaussian (neureka 1.0.0 API) - - - - + +Gaussian (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Gaussian

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Gaussian
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Gaussian

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Gaussian

        -
        public Gaussian()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Gaussian

            +
            public Gaussian()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/GaussianFast.html b/docs/jdocs/neureka/backend/main/operations/functions/GaussianFast.html index f0a495eb9..13ce49477 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/GaussianFast.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/GaussianFast.html @@ -1,210 +1,286 @@ - + + - -GaussianFast (neureka 1.0.0 API) - - - - + +GaussianFast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class GaussianFast

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.GaussianFast
    -
    +
    neureka.backend.main.operations.functions
    +

    Class GaussianFast

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        GaussianFast

        -
        public GaussianFast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            GaussianFast

            +
            public GaussianFast()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/GeLU.html b/docs/jdocs/neureka/backend/main/operations/functions/GeLU.html index 4cb7d160e..541cb0da5 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/GeLU.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/GeLU.html @@ -1,215 +1,291 @@ - + + - -GeLU (neureka 1.0.0 API) - - - - + +GeLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.GeLU
    -
    +
    neureka.backend.main.operations.functions
    +

    Class GeLU

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public class GeLU -extends AbstractOperation
      +
      +
      public class GeLU
      +extends AbstractOperation
      The GELU activation function is based on the standard Gaussian cumulative distribution function and is defined as x Φ( x ) and implemented as x * sigmoid(x * 1.702). The GELU non-linearity weighs inputs by their percentile, rather than gates inputs by their sign as in ReLUs. Consequently, the GELU can be thought of as a smoother ReLU.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        GeLU

        -
        public GeLU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            GeLU

            +
            public GeLU()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -225,25 +301,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Identity.html b/docs/jdocs/neureka/backend/main/operations/functions/Identity.html index 2061250f1..c523e6861 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Identity.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Identity.html @@ -1,210 +1,286 @@ - + + - -Identity (neureka 1.0.0 API) - - - - + +Identity (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Identity

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Identity
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Identity

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Identity

        -
        public Identity()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Identity

            +
            public Identity()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Log10.html b/docs/jdocs/neureka/backend/main/operations/functions/Log10.html index 942108b1f..4d099297f 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Log10.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Log10.html @@ -1,210 +1,286 @@ - + + - -Log10 (neureka 1.0.0 API) - - - - + +Log10 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Log10
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Log10

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Log10

        -
        public Log10()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Log10

            +
            public Log10()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Logarithm.html b/docs/jdocs/neureka/backend/main/operations/functions/Logarithm.html index ebbade591..02f194fc1 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Logarithm.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Logarithm.html @@ -1,244 +1,324 @@ - + + - -Logarithm (neureka 1.0.0 API) - - - - + +Logarithm (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Logarithm

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Logarithm
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Logarithm

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Logarithm

        -
        public Logarithm()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Logarithm

            +
            public Logarithm()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + + + + + +
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -254,25 +334,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Quadratic.html b/docs/jdocs/neureka/backend/main/operations/functions/Quadratic.html index 745636cd1..e4c7968cd 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Quadratic.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Quadratic.html @@ -1,210 +1,286 @@ - + + - -Quadratic (neureka 1.0.0 API) - - - - + +Quadratic (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Quadratic

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Quadratic
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Quadratic

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Quadratic

        -
        public Quadratic()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Quadratic

            +
            public Quadratic()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/ReLU.html b/docs/jdocs/neureka/backend/main/operations/functions/ReLU.html index f626d306b..c95c63114 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/ReLU.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/ReLU.html @@ -1,210 +1,286 @@ - + + - -ReLU (neureka 1.0.0 API) - - - - + +ReLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.ReLU
    -
    +
    neureka.backend.main.operations.functions
    +

    Class ReLU

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ReLU

        -
        public ReLU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ReLU

            +
            public ReLU()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/SeLU.html b/docs/jdocs/neureka/backend/main/operations/functions/SeLU.html index 2977f23c5..2e3eded09 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/SeLU.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/SeLU.html @@ -1,219 +1,294 @@ - + + - -SeLU (neureka 1.0.0 API) - - - - + +SeLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.SeLU
    -
    +
    neureka.backend.main.operations.functions
    +

    Class SeLU

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public class SeLU -extends AbstractOperation
      +
      +
      public class SeLU
      +extends AbstractOperation
      The Scaled Exponential Linear Unit, or SELU, is an activation functions that induce self-normalizing properties. The SELU activation function is implemented as: - - if ( x > 0 ) return SCALE * x; + if ( x > 0 ) return SCALE * x; else if ( x <= 0 ) return SCALE * ALPHA * (Math.exp(x) - 1); else return Float.NaN;
      ...where ALPHA == 1.6733 and SCALE == 1.0507.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SeLU

        -
        public SeLU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SeLU

            +
            public SeLU()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -229,25 +304,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/SiLU.html b/docs/jdocs/neureka/backend/main/operations/functions/SiLU.html index 99d0ec079..8245ab3d9 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/SiLU.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/SiLU.html @@ -1,214 +1,290 @@ - + + - -SiLU (neureka 1.0.0 API) - - - - + +SiLU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.SiLU
    -
    +
    neureka.backend.main.operations.functions
    +

    Class SiLU

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public class SiLU -extends AbstractOperation
      +
      +
      public class SiLU
      +extends AbstractOperation
      The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x). It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SiLU

        -
        public SiLU()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SiLU

            +
            public SiLU()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -224,25 +300,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Sigmoid.html b/docs/jdocs/neureka/backend/main/operations/functions/Sigmoid.html index 76bf808df..861fb54dc 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Sigmoid.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Sigmoid.html @@ -1,210 +1,286 @@ - + + - -Sigmoid (neureka 1.0.0 API) - - - - + +Sigmoid (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sigmoid

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Sigmoid
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Sigmoid

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Sigmoid

        -
        public Sigmoid()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Sigmoid

            +
            public Sigmoid()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Sinus.html b/docs/jdocs/neureka/backend/main/operations/functions/Sinus.html index 5cc90162b..3982c21d6 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Sinus.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Sinus.html @@ -1,210 +1,286 @@ - + + - -Sinus (neureka 1.0.0 API) - - - - + +Sinus (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Sinus
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Sinus

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Sinus

        -
        public Sinus()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Sinus

            +
            public Sinus()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Softplus.html b/docs/jdocs/neureka/backend/main/operations/functions/Softplus.html index 8e8bfc007..9d5a8a7f3 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Softplus.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Softplus.html @@ -1,212 +1,288 @@ - + + - -Softplus (neureka 1.0.0 API) - - - - + +Softplus (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Softplus

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Softplus
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Softplus

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public final class Softplus -extends AbstractOperation
      +
      +
      public final class Softplus
      +extends AbstractOperation
      SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Softplus

        -
        public Softplus()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Softplus

            +
            public Softplus()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -222,25 +298,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Softsign.html b/docs/jdocs/neureka/backend/main/operations/functions/Softsign.html index aded5fb0f..501756ae9 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Softsign.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Softsign.html @@ -1,216 +1,292 @@ - + + - -Softsign (neureka 1.0.0 API) - - - - + +Softsign (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Softsign

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Softsign
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Softsign

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public class Softsign -extends AbstractOperation
      +
      +
      public class Softsign
      +extends AbstractOperation
      The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function - which rescales the inputs between -1 and 1, very much like the Tanh function. + which rescales the inputs between -1 and 1, very much like the Tanh function. The softsign function converges polynomially and is computationally cheaper than the tanh function which converges exponentially. - This makes this function a computationally cheap non-exponential quasi Tanh!
      -
    -
    -
      + This makes this function a computationally cheap non-exponential quasi Tanh!
    + + +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Softsign

        -
        public Softsign()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Softsign

            +
            public Softsign()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -226,25 +302,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Sqrt.html b/docs/jdocs/neureka/backend/main/operations/functions/Sqrt.html index 564f0d3d0..b6ec57e87 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Sqrt.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Sqrt.html @@ -1,210 +1,286 @@ - + + - -Sqrt (neureka 1.0.0 API) - - - - + +Sqrt (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Sqrt
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Sqrt

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Sqrt

        -
        public Sqrt()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Sqrt

            +
            public Sqrt()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/Tanh.html b/docs/jdocs/neureka/backend/main/operations/functions/Tanh.html index 280689e7b..7451503e0 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/Tanh.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/Tanh.html @@ -1,210 +1,286 @@ - + + - -Tanh (neureka 1.0.0 API) - - - - + +Tanh (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.Tanh
    -
    +
    neureka.backend.main.operations.functions
    +

    Class Tanh

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Tanh

        -
        public Tanh()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Tanh

            +
            public Tanh()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/TanhFast.html b/docs/jdocs/neureka/backend/main/operations/functions/TanhFast.html index c2ac40173..4eac2a57f 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/TanhFast.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/TanhFast.html @@ -1,210 +1,286 @@ - + + - -TanhFast (neureka 1.0.0 API) - - - - + +TanhFast (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class TanhFast

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.functions.TanhFast
    -
    +
    neureka.backend.main.operations.functions
    +

    Class TanhFast

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        TanhFast

        -
        public TanhFast()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            TanhFast

            +
            public TanhFast()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public final double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public final double calculate(double[] inputs,
      +                              int j,
      +                              int d,
      +                              Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/package-frame.html b/docs/jdocs/neureka/backend/main/operations/functions/package-frame.html new file mode 100644 index 000000000..5aa92296c --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/functions/package-frame.html @@ -0,0 +1,41 @@ + + + + + +neureka.backend.main.operations.functions (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.functions

    + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/package-summary.html b/docs/jdocs/neureka/backend/main/operations/functions/package-summary.html index faa67ee1c..e27ce9336 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/package-summary.html @@ -1,195 +1,265 @@ - + + - -neureka.backend.main.operations.functions (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.functions (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.functions

    -
    -
    -
    package neureka.backend.main.operations.functions
    -
    -
    Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
    -
    -
    -
      -
    • -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
       
      - -
       
      - -
       
      - -
       
      - -
      -
      The Self Gated Softsign Unit is based on the Softsign function - (a computationally cheap non-exponential quasi Tanh) - making it a polynomially based version of the GaTU function which - is itself based on the Tanh function.
      -
      - -
      -
      The Self Gated Tanh Unit is based on the Tanh - making it an exponentiation based version of the GaSU function which - is itself based on the Softsign function - (a computationally cheap non-exponential quasi Tanh).
      -
      - -
       
      - -
       
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        Absolute 
        Cbrt 
        Cosinus 
        Exp 
        GaSU +
        The Self Gated Softsign Unit is based on the Softsign function + (a computationally cheap non-exponential quasi Tanh) + making it a polynomially based version of the GaTU function which + is itself based on the Tanh function.
        +
        GaTU +
        The Self Gated Tanh Unit is based on the Tanh + making it an exponentiation based version of the GaSU function which + is itself based on the Softsign function + (a computationally cheap non-exponential quasi Tanh).
        +
        Gaussian 
        GaussianFast 
        GeLU
        The GELU activation function is based on the standard Gaussian cumulative distribution function and is defined as x Φ( x ) and implemented as x * sigmoid(x * 1.702).
        - - -
         
        - -
         
        - -
         
        - -
         
        - -
         
        - -
        +
        Identity 
        Log10 
        Logarithm 
        Quadratic 
        ReLU 
        SeLU
        The Scaled Exponential Linear Unit, or SELU, is an activation functions that induce self-normalizing properties.
        - - -
         
        - -
        +
        Sigmoid 
        SiLU
        The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x).
        - - -
         
        - -
        +
        Sinus 
        Softplus
        SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
        - - -
        +
        Softsign
        The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function - which rescales the inputs between -1 and 1, very much like the Tanh function.
        - - -
         
        - -
         
        - -
         
        - - + which rescales the inputs between -1 and 1, very much like the Tanh function. +
        Sqrt 
        Tanh 
        TanhFast 
      -
    -
    + + + +

    Package neureka.backend.main.operations.functions Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/functions/package-tree.html b/docs/jdocs/neureka/backend/main/operations/functions/package-tree.html index a94c09ad4..1f4e607a5 100644 --- a/docs/jdocs/neureka/backend/main/operations/functions/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/functions/package-tree.html @@ -1,97 +1,160 @@ - + + - -neureka.backend.main.operations.functions Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.functions Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.functions

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.backend.api.template.operations.AbstractOperation (implements neureka.backend.api.Operation) +
      • neureka.backend.api.template.operations.AbstractOperation (implements neureka.backend.api.Operation)
          -
        • neureka.backend.main.operations.functions.Absolute
        • -
        • neureka.backend.main.operations.functions.Cbrt
        • -
        • neureka.backend.main.operations.functions.Cosinus
        • -
        • neureka.backend.main.operations.functions.Exp
        • -
        • neureka.backend.main.operations.functions.GaSU
        • -
        • neureka.backend.main.operations.functions.GaTU
        • -
        • neureka.backend.main.operations.functions.Gaussian
        • -
        • neureka.backend.main.operations.functions.GaussianFast
        • -
        • neureka.backend.main.operations.functions.GeLU
        • -
        • neureka.backend.main.operations.functions.Identity
        • -
        • neureka.backend.main.operations.functions.Log10
        • -
        • neureka.backend.main.operations.functions.Logarithm
        • -
        • neureka.backend.main.operations.functions.Quadratic
        • -
        • neureka.backend.main.operations.functions.ReLU
        • -
        • neureka.backend.main.operations.functions.SeLU
        • -
        • neureka.backend.main.operations.functions.Sigmoid
        • -
        • neureka.backend.main.operations.functions.SiLU
        • -
        • neureka.backend.main.operations.functions.Sinus
        • -
        • neureka.backend.main.operations.functions.Softplus
        • -
        • neureka.backend.main.operations.functions.Softsign
        • -
        • neureka.backend.main.operations.functions.Sqrt
        • -
        • neureka.backend.main.operations.functions.Tanh
        • -
        • neureka.backend.main.operations.functions.TanhFast
        • +
        • neureka.backend.main.operations.functions.Absolute
        • +
        • neureka.backend.main.operations.functions.Cbrt
        • +
        • neureka.backend.main.operations.functions.Cosinus
        • +
        • neureka.backend.main.operations.functions.Exp
        • +
        • neureka.backend.main.operations.functions.GaSU
        • +
        • neureka.backend.main.operations.functions.GaTU
        • +
        • neureka.backend.main.operations.functions.Gaussian
        • +
        • neureka.backend.main.operations.functions.GaussianFast
        • +
        • neureka.backend.main.operations.functions.GeLU
        • +
        • neureka.backend.main.operations.functions.Identity
        • +
        • neureka.backend.main.operations.functions.Log10
        • +
        • neureka.backend.main.operations.functions.Logarithm
        • +
        • neureka.backend.main.operations.functions.Quadratic
        • +
        • neureka.backend.main.operations.functions.ReLU
        • +
        • neureka.backend.main.operations.functions.SeLU
        • +
        • neureka.backend.main.operations.functions.Sigmoid
        • +
        • neureka.backend.main.operations.functions.SiLU
        • +
        • neureka.backend.main.operations.functions.Sinus
        • +
        • neureka.backend.main.operations.functions.Softplus
        • +
        • neureka.backend.main.operations.functions.Softsign
        • +
        • neureka.backend.main.operations.functions.Sqrt
        • +
        • neureka.backend.main.operations.functions.Tanh
        • +
        • neureka.backend.main.operations.functions.TanhFast
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/indexer/Product.html b/docs/jdocs/neureka/backend/main/operations/indexer/Product.html index 63cf19313..535a8621d 100644 --- a/docs/jdocs/neureka/backend/main/operations/indexer/Product.html +++ b/docs/jdocs/neureka/backend/main/operations/indexer/Product.html @@ -1,205 +1,278 @@ - + + - -Product (neureka 1.0.0 API) - - - - + +Product (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Product

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.indexer.Product
    -
    +
    neureka.backend.main.operations.indexer
    +

    Class Product

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public final class Product -extends AbstractOperation
      +
      +
      public final class Product
      +extends AbstractOperation
      This type of operation belongs to the same species as the - Summation operation. + Summation operation. It executes incoming calls so that the calling function will be executed with all input indices passed to it. The resulting array of tensors will then multiplied with each other - to produce the result of this operation, hence the name Product.
      -
    -
    -
      + to produce the result of this operation, hence the name Product.
    + + +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Product

        -
        public Product()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Product

            +
            public Product()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      execute

      -
      public Result execute(Function caller, - ExecutionCall<?> call)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          execute

          +
          public Result execute(Function caller,
          +                      ExecutionCall<?> call)
        • -
        • -
          -

          calculate

          -
          public double calculate(double[] inputs, - int j, - int d, - Function[] src)
          -
          Description copied from interface: Operation
          +
        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -215,33 +288,95 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
  • -
  • -
    -

    calculate

    -
    public static double calculate(double[] inputs, - int d, - Function[] src)
    -
    + + + + +
      +
    • +

      calculate

      +
      public static double calculate(double[] inputs,
      +                               int d,
      +                               Function[] src)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/indexer/Summation.html b/docs/jdocs/neureka/backend/main/operations/indexer/Summation.html index 91853ca1a..c0731b0be 100644 --- a/docs/jdocs/neureka/backend/main/operations/indexer/Summation.html +++ b/docs/jdocs/neureka/backend/main/operations/indexer/Summation.html @@ -1,205 +1,278 @@ - + + - -Summation (neureka 1.0.0 API) - - - - + +Summation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Summation

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.indexer.Summation
    -
    +
    neureka.backend.main.operations.indexer
    +

    Class Summation

    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public final class Summation -extends AbstractOperation
      +
      +
      public final class Summation
      +extends AbstractOperation
      This type of operation belongs to the same species as the - Product operation. + Product operation. It executes incoming calls so that the calling function will be executed with all input indices passed to it. The resulting array of tensors will then be summed - to produce the result of this operation, hence the name Summation.
      -
    -
    -
      + to produce the result of this operation, hence the name Summation.
    + + +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Summation

        -
        public Summation()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Summation

            +
            public Summation()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      execute

      -
      public Result execute(Function caller, - ExecutionCall<?> call)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          execute

          +
          public Result execute(Function caller,
          +                      ExecutionCall<?> call)
        • -
        • -
          -

          calculate

          -
          public double calculate(double[] inputs, - int j, - int d, - Function[] src)
          -
          Description copied from interface: Operation
          +
        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -215,33 +288,95 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
  • -
  • -
    -

    calculate

    -
    public static double calculate(double[] inputs, - int d, - Function[] src)
    -
    + + + + +
      +
    • +

      calculate

      +
      public static double calculate(double[] inputs,
      +                               int d,
      +                               Function[] src)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/indexer/package-frame.html b/docs/jdocs/neureka/backend/main/operations/indexer/package-frame.html new file mode 100644 index 000000000..ff76af8d1 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/indexer/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.backend.main.operations.indexer (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.indexer

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/operations/indexer/package-summary.html b/docs/jdocs/neureka/backend/main/operations/indexer/package-summary.html index 37348720c..278b89a5b 100644 --- a/docs/jdocs/neureka/backend/main/operations/indexer/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/indexer/package-summary.html @@ -1,134 +1,162 @@ - + + - -neureka.backend.main.operations.indexer (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.indexer (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.indexer

    -
    -
    -
    package neureka.backend.main.operations.indexer
    -
    -
    Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
    -
    -
    -
      -
    • -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      +

      See: Description

      +
      +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        Product
        This type of operation belongs to the same species as the - Summation operation.
        - - -
        + Summation operation.
        +
        Summation
        This type of operation belongs to the same species as the - Product operation.
        - - - + Product operation. +
      -
    -
    + + + +

    Package neureka.backend.main.operations.indexer Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/indexer/package-tree.html b/docs/jdocs/neureka/backend/main/operations/indexer/package-tree.html index 3c787f2e8..68a31477f 100644 --- a/docs/jdocs/neureka/backend/main/operations/indexer/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/indexer/package-tree.html @@ -1,76 +1,139 @@ - + + - -neureka.backend.main.operations.indexer Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.indexer Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.indexer

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/Convolution.html b/docs/jdocs/neureka/backend/main/operations/linear/Convolution.html index 8de491c32..631c91203 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/Convolution.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/Convolution.html @@ -1,194 +1,266 @@ - + + - -Convolution (neureka 1.0.0 API) - - - - + +Convolution (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Convolution

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.linear.Convolution
    +
    neureka.backend.main.operations.linear
    +

    Class Convolution

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Convolution

        -
        public Convolution()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Convolution

            +
            public Convolution()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      execute

      -
      public Result execute(Function caller, - ExecutionCall<?> call)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          execute

          +
          public Result execute(Function caller,
          +                      ExecutionCall<?> call)
        • -
        • -
          -

          calculate

          -
          public double calculate(double[] inputs, - int j, - int d, - Function[] src)
          -
          Description copied from interface: Operation
          +
        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -204,25 +276,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/DotProduct.html b/docs/jdocs/neureka/backend/main/operations/linear/DotProduct.html index c1788ec22..0b3d387fd 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/DotProduct.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/DotProduct.html @@ -1,183 +1,251 @@ - + + - -DotProduct (neureka 1.0.0 API) - - - - + +DotProduct (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DotProduct

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.linear.DotProduct
    +
    neureka.backend.main.operations.linear
    +

    Class DotProduct

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        DotProduct

        -
        public DotProduct()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            DotProduct

            +
            public DotProduct()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +261,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/MatMul.html b/docs/jdocs/neureka/backend/main/operations/linear/MatMul.html index 7aaf9612e..1d77c60d3 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/MatMul.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/MatMul.html @@ -1,183 +1,266 @@ - + + - -MatMul (neureka 1.0.0 API) - - - - + +MatMul (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class MatMul

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.linear.MatMul
    +
    neureka.backend.main.operations.linear
    +

    Class MatMul

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        MatMul

        -
        public MatMul()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            MatMul

            +
            public MatMul()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +276,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/XConvLeft.html b/docs/jdocs/neureka/backend/main/operations/linear/XConvLeft.html index 41ad1a0d9..79ad62e8d 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/XConvLeft.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/XConvLeft.html @@ -1,199 +1,271 @@ - + + - -XConvLeft (neureka 1.0.0 API) - - - - + +XConvLeft (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class XConvLeft

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.linear.XConvLeft
    +
    neureka.backend.main.operations.linear
    +

    Class XConvLeft

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        XConvLeft

        -
        public XConvLeft()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            XConvLeft

            +
            public XConvLeft()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -209,25 +281,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/XConvRight.html b/docs/jdocs/neureka/backend/main/operations/linear/XConvRight.html index 4126d1964..d02b65a5f 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/XConvRight.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/XConvRight.html @@ -1,199 +1,271 @@ - + + - -XConvRight (neureka 1.0.0 API) - - - - + +XConvRight (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class XConvRight

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.linear.XConvRight
    +
    neureka.backend.main.operations.linear
    +

    Class XConvRight

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        XConvRight

        -
        public XConvRight()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            XConvRight

            +
            public XConvRight()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -209,25 +281,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/AXPY.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/AXPY.html index 0ccc031df..4f73f8897 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/AXPY.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/AXPY.html @@ -1,193 +1,307 @@ - + + - -AXPY (neureka 1.0.0 API) - - - - + +AXPY (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AXPY

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Class AXPY

    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.blas.AXPY
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.blas.AXPY
      • +
      +
    • +
    +
    +
      +

    • -
      public final class AXPY -extends Object
      +
      +
      public final class AXPY
      +extends java.lang.Object
      The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y are vectors each with a number of elements that equals n. y[] += a * x[]
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        AXPY() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static void
      -
      invoke(double[] y, - int yOffset, - double multiplier, - double[] x, - int xOffset, - int start, - int limit)
      -
       
      -
      static void
      -
      invoke(float[] y, - int basey, - float a, - float[] x, - int basex, - int first, - int limit)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidinvoke(double[] y, + int yOffset, + double multiplier, + double[] x, + int xOffset, + int start, + int limit) 
        static voidinvoke(float[] y, + int basey, + float a, + float[] x, + int basex, + int first, + int limit) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AXPY

        -
        public AXPY()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AXPY

            +
            public AXPY()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      invoke

      -
      public static void invoke(double[] y, - int yOffset, - double multiplier, - double[] x, - int xOffset, - int start, - int limit)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          invoke

          +
          public static void invoke(double[] y,
          +                          int yOffset,
          +                          double multiplier,
          +                          double[] x,
          +                          int xOffset,
          +                          int start,
          +                          int limit)
        • -
        • -
          -

          invoke

          -
          public static void invoke(float[] y, - int basey, - float a, - float[] x, - int basex, - int first, - int limit)
          -
          +
        + + + +
          +
        • +

          invoke

          +
          public static void invoke(float[] y,
          +                          int basey,
          +                          float a,
          +                          float[] x,
          +                          int basex,
          +                          int first,
          +                          int limit)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/COPY.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/COPY.html index 2098211d3..7097f7e73 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/COPY.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/COPY.html @@ -1,159 +1,271 @@ - + + - -COPY (neureka 1.0.0 API) - - - - + +COPY (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class COPY

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Class COPY

    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.blas.COPY
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.blas.COPY
      • +
      +
    • +
    +
    +
      +

    • -
      public final class COPY -extends Object
      +
      +
      public final class COPY
      +extends java.lang.Object
      The ?copy routines perform a vector-vector operation defined as y = x, where x and y are vectors.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        COPY() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static <T> T[]
      -
      copyOf(T[] original)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static <T> T[]copyOf(T[] original) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        COPY

        -
        public COPY()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            COPY

            +
            public COPY()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      copyOf

      -
      public static <T> T[] copyOf(T[] original)
      -
      +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          copyOf

          +
          public static <T> T[] copyOf(T[] original)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/DOT.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/DOT.html index 27cf9d613..9b9828cd4 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/DOT.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/DOT.html @@ -1,227 +1,349 @@ - + + - -DOT (neureka 1.0.0 API) - - - - + +DOT (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.blas.DOT
    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Class DOT

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.blas.DOT
      • +
      +
    • +
    +
    +
      +

    • -
      public final class DOT -extends Object
      +
      +
      public final class DOT
      +extends java.lang.Object
      The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are elements of vectors x and y.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      DOT()
      -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DOT() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static double
      -
      invoke(double[] array1, - int offset1, - double[] array2, - int offset2, - int first, - int limit)
      -
       
      -
      static float
      -
      invoke(float[] array1, - int offset1, - float[] array2, - int offset2, - int first, - int limit)
      -
       
      -
      static int
      -
      invoke(int[] array1, - int offset1, - int[] array2, - int offset2, - int first, - int limit)
      -
       
      -
      static long
      -
      invoke(long[] array1, - int offset1, - long[] array2, - int offset2, - int first, - int limit)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static doubleinvoke(double[] array1, + int offset1, + double[] array2, + int offset2, + int first, + int limit) 
        static floatinvoke(float[] array1, + int offset1, + float[] array2, + int offset2, + int first, + int limit) 
        static intinvoke(int[] array1, + int offset1, + int[] array2, + int offset2, + int first, + int limit) 
        static longinvoke(long[] array1, + int offset1, + long[] array2, + int offset2, + int first, + int limit) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      - -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        DOT

        -
        public DOT()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            DOT

            +
            public DOT()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      invoke

      -
      public static double invoke(double[] array1, - int offset1, - double[] array2, - int offset2, - int first, - int limit)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          invoke

          +
          public static double invoke(double[] array1,
          +                            int offset1,
          +                            double[] array2,
          +                            int offset2,
          +                            int first,
          +                            int limit)
        • -
        • -
          -

          invoke

          -
          public static float invoke(float[] array1, - int offset1, - float[] array2, - int offset2, - int first, - int limit)
          -
          +
        + + + +
          +
        • +

          invoke

          +
          public static float invoke(float[] array1,
          +                           int offset1,
          +                           float[] array2,
          +                           int offset2,
          +                           int first,
          +                           int limit)
        • -
        • -
          -

          invoke

          -
          public static long invoke(long[] array1, - int offset1, - long[] array2, - int offset2, - int first, - int limit)
          -
          +
        + + + +
          +
        • +

          invoke

          +
          public static long invoke(long[] array1,
          +                          int offset1,
          +                          long[] array2,
          +                          int offset2,
          +                          int first,
          +                          int limit)
        • -
        • -
          -

          invoke

          -
          public static int invoke(int[] array1, - int offset1, - int[] array2, - int offset2, - int first, - int limit)
          -
          +
        + + + +
          +
        • +

          invoke

          +
          public static int invoke(int[] array1,
          +                         int offset1,
          +                         int[] array2,
          +                         int offset2,
          +                         int first,
          +                         int limit)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF32.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF32.html index 1231f5392..ac5adc3da 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF32.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF32.html @@ -1,139 +1,233 @@ - + + - -GEMM.VectorOperationF32 (neureka 1.0.0 API) - - - - + +GEMM.VectorOperationF32 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface GEMM.VectorOperationF32

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Interface GEMM.VectorOperationF32

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      GEMM
      +
      GEMM
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public static interface GEMM.VectorOperationF32
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      void
      -
      invoke(float[] product, - float[] left, - int complexity, - float[] right)
      -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public static interface GEMM.VectorOperationF32
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        voidinvoke(float[] product, + float[] left, + int complexity, + float[] right) 
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        invoke

        -
        void invoke(float[] product, - float[] left, - int complexity, - float[] right)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            invoke

            +
            void invoke(float[] product,
            +            float[] left,
            +            int complexity,
            +            float[] right)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF64.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF64.html index 655c69139..cad96c09b 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF64.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.VectorOperationF64.html @@ -1,139 +1,233 @@ - + + - -GEMM.VectorOperationF64 (neureka 1.0.0 API) - - - - + +GEMM.VectorOperationF64 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface GEMM.VectorOperationF64

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Interface GEMM.VectorOperationF64

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      GEMM
      +
      GEMM
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public static interface GEMM.VectorOperationF64
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      void
      -
      invoke(double[] product, - double[] left, - int complexity, - double[] right)
      -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public static interface GEMM.VectorOperationF64
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        voidinvoke(double[] product, + double[] left, + int complexity, + double[] right) 
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        invoke

        -
        void invoke(double[] product, - double[] left, - int complexity, - double[] right)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            invoke

            +
            void invoke(double[] product,
            +            double[] left,
            +            int complexity,
            +            double[] right)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.html index c048efd47..25dfb8b8d 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/GEMM.html @@ -1,196 +1,315 @@ - + + - -GEMM (neureka 1.0.0 API) - - - - + +GEMM (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.blas.GEMM
    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Class GEMM

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.blas.GEMM
      • +
      +
    • +
    +
    +
      +

    • -
      public class GEMM -extends Object
      +
      +
      public class GEMM
      +extends java.lang.Object
      A collection of primitive sub-routines for matrix multiplication performed on continuous arrays which are designed so that they can be vectorized by the JVMs JIT compiler (AVX instructions).
      -
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        GEMM

        -
        public GEMM()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            GEMM

            +
            public GEMM()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      operationForF32

      -
      public static GEMM.VectorOperationF32 operationForF32(boolean rowMajor, - long rows, - long columns)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          operationForF32

          +
          public static GEMM.VectorOperationF32 operationForF32(boolean rowMajor,
          +                                                      long rows,
          +                                                      long columns)
        • -
        • -
          -

          operationForF64

          -
          public static GEMM.VectorOperationF64 operationForF64(boolean rowMajor, - long rows, - long columns)
          -
          +
        + + + +
          +
        • +

          operationForF64

          +
          public static GEMM.VectorOperationF64 operationForF64(boolean rowMajor,
          +                                                      long rows,
          +                                                      long columns)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IAXPY.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IAXPY.html index f9e110de2..9044e56d3 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IAXPY.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IAXPY.html @@ -1,193 +1,307 @@ - + + - -IAXPY (neureka 1.0.0 API) - - - - + +IAXPY (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class IAXPY

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Class IAXPY

    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.blas.IAXPY
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.blas.IAXPY
      • +
      +
    • +
    +
    +
      +

    • -
      public final class IAXPY -extends Object
      +
      +
      public final class IAXPY
      +extends java.lang.Object
      The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y are vectors each with a number of elements that equals n. y[] += a * x[]
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        IAXPY() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static void
      -
      invoke(int[] y, - int basey, - int a, - int[] x, - int basex, - int first, - int limit)
      -
       
      -
      static void
      -
      invoke(long[] y, - int yOffset, - long multiplier, - long[] x, - int xOffset, - int start, - int limit)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidinvoke(int[] y, + int basey, + int a, + int[] x, + int basex, + int first, + int limit) 
        static voidinvoke(long[] y, + int yOffset, + long multiplier, + long[] x, + int xOffset, + int start, + int limit) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        IAXPY

        -
        public IAXPY()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            IAXPY

            +
            public IAXPY()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      invoke

      -
      public static void invoke(long[] y, - int yOffset, - long multiplier, - long[] x, - int xOffset, - int start, - int limit)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          invoke

          +
          public static void invoke(long[] y,
          +                          int yOffset,
          +                          long multiplier,
          +                          long[] x,
          +                          int xOffset,
          +                          int start,
          +                          int limit)
        • -
        • -
          -

          invoke

          -
          public static void invoke(int[] y, - int basey, - int a, - int[] x, - int basex, - int first, - int limit)
          -
          +
        + + + +
          +
        • +

          invoke

          +
          public static void invoke(int[] y,
          +                          int basey,
          +                          int a,
          +                          int[] x,
          +                          int basex,
          +                          int first,
          +                          int limit)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IDOT.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IDOT.html index 328393573..09f423542 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IDOT.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IDOT.html @@ -1,189 +1,303 @@ - + + - -IDOT (neureka 1.0.0 API) - - - - + +IDOT (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class IDOT

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Class IDOT

    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.blas.IDOT
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.blas.IDOT
      • +
      +
    • +
    +
    +
      +

    • -
      public final class IDOT -extends Object
      +
      +
      public final class IDOT
      +extends java.lang.Object
      The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are elements of vectors x and y.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        IDOT() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static int
      -
      invoke(int[] array1, - int offset1, - int[] array2, - int offset2, - int first, - int limit)
      -
       
      -
      static long
      -
      invoke(long[] array1, - int offset1, - long[] array2, - int offset2, - int first, - int limit)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static intinvoke(int[] array1, + int offset1, + int[] array2, + int offset2, + int first, + int limit) 
        static longinvoke(long[] array1, + int offset1, + long[] array2, + int offset2, + int first, + int limit) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        IDOT

        -
        public IDOT()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            IDOT

            +
            public IDOT()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      invoke

      -
      public static long invoke(long[] array1, - int offset1, - long[] array2, - int offset2, - int first, - int limit)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          invoke

          +
          public static long invoke(long[] array1,
          +                          int offset1,
          +                          long[] array2,
          +                          int offset2,
          +                          int first,
          +                          int limit)
        • -
        • -
          -

          invoke

          -
          public static int invoke(int[] array1, - int offset1, - int[] array2, - int offset2, - int first, - int limit)
          -
          +
        + + + +
          +
        • +

          invoke

          +
          public static int invoke(int[] array1,
          +                         int offset1,
          +                         int[] array2,
          +                         int offset2,
          +                         int first,
          +                         int limit)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI32.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI32.html index 7e43056fb..6a33c4e07 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI32.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI32.html @@ -1,139 +1,233 @@ - + + - -IGEMM.VectorOperationI32 (neureka 1.0.0 API) - - - - + +IGEMM.VectorOperationI32 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface IGEMM.VectorOperationI32

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Interface IGEMM.VectorOperationI32

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      IGEMM
      +
      IGEMM
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public static interface IGEMM.VectorOperationI32
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      void
      -
      invoke(int[] product, - int[] left, - int complexity, - int[] right)
      -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public static interface IGEMM.VectorOperationI32
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        voidinvoke(int[] product, + int[] left, + int complexity, + int[] right) 
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        invoke

        -
        void invoke(int[] product, - int[] left, - int complexity, - int[] right)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            invoke

            +
            void invoke(int[] product,
            +            int[] left,
            +            int complexity,
            +            int[] right)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI64.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI64.html index 3a8cb7257..e7657adc4 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI64.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.VectorOperationI64.html @@ -1,139 +1,233 @@ - + + - -IGEMM.VectorOperationI64 (neureka 1.0.0 API) - - - - + +IGEMM.VectorOperationI64 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface IGEMM.VectorOperationI64

    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Interface IGEMM.VectorOperationI64

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      IGEMM
      +
      IGEMM
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public static interface IGEMM.VectorOperationI64
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      void
      -
      invoke(long[] product, - long[] left, - int complexity, - long[] right)
      -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public static interface IGEMM.VectorOperationI64
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        voidinvoke(long[] product, + long[] left, + int complexity, + long[] right) 
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        invoke

        -
        void invoke(long[] product, - long[] left, - int complexity, - long[] right)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            invoke

            +
            void invoke(long[] product,
            +            long[] left,
            +            int complexity,
            +            long[] right)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.html index 00976f41b..631e1ccaa 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/IGEMM.html @@ -1,196 +1,315 @@ - + + - -IGEMM (neureka 1.0.0 API) - - - - + +IGEMM (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.blas.IGEMM
    +
    neureka.backend.main.operations.linear.internal.blas
    +

    Class IGEMM

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.blas.IGEMM
      • +
      +
    • +
    +
    +
      +

    • -
      public class IGEMM -extends Object
      +
      +
      public class IGEMM
      +extends java.lang.Object
      A collection of primitive sub-routines for matrix multiplication performed on continuous arrays which are designed so that they can be vectorized by the JVMs JIT compiler (AVX instructions).
      -
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        IGEMM

        -
        public IGEMM()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            IGEMM

            +
            public IGEMM()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      operationForI32

      -
      public static IGEMM.VectorOperationI32 operationForI32(boolean rowMajor, - long rows, - long columns)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          operationForI32

          +
          public static IGEMM.VectorOperationI32 operationForI32(boolean rowMajor,
          +                                                       long rows,
          +                                                       long columns)
        • -
        • -
          -

          operationForI64

          -
          public static IGEMM.VectorOperationI64 operationForI64(boolean rowMajor, - long rows, - long columns)
          -
          +
        + + + +
          +
        • +

          operationForI64

          +
          public static IGEMM.VectorOperationI64 operationForI64(boolean rowMajor,
          +                                                       long rows,
          +                                                       long columns)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-frame.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-frame.html new file mode 100644 index 000000000..920a67c5b --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-frame.html @@ -0,0 +1,32 @@ + + + + + +neureka.backend.main.operations.linear.internal.blas (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.linear.internal.blas

    + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-summary.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-summary.html index 5f563bacd..9d1cc39ec 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-summary.html @@ -1,134 +1,223 @@ - + + - -neureka.backend.main.operations.linear.internal.blas (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.linear.internal.blas (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.linear.internal.blas

    -
    -
    -
    package neureka.backend.main.operations.linear.internal.blas
    -
    +

    Package neureka.backend.main.operations.linear.internal.blas

    +
    Everything in this package should be considered library-private! DO NOT USE CLASSES INSIDE THIS PACKAGE!
    -
    -
    -
      -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      +
      +

      See: Description

      +
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        GEMM.VectorOperationF32 
        GEMM.VectorOperationF64 
        IGEMM.VectorOperationI32 
        IGEMM.VectorOperationI64 
        +
      • +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        AXPY
        The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y are vectors each with a number of elements that equals n.
        - - -
        +
        COPY
        The ?copy routines perform a vector-vector operation defined as y = x, where x and y are vectors.
        - - -
        +
        DOT
        The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are elements of vectors x and y.
        - - -
        +
        GEMM
        A collection of primitive sub-routines for matrix multiplication performed on continuous arrays which are designed so that they can be vectorized by the JVMs JIT compiler (AVX instructions).
        - - -
         
        - -
         
        - -
        +
        IAXPY
        The ?axpy routines perform a vector-vector operation defined as y := a*x + y where: a is a scalar x and y are vectors each with a number of elements that equals n.
        - - -
        +
        IDOT
        The ?dot routines perform a vector-vector reduction operation defined as Equation where xi and yi are elements of vectors x and y.
        - - -
        +
        IGEMM
        A collection of primitive sub-routines for matrix multiplication performed on continuous arrays which are designed so that they can be vectorized by the JVMs JIT compiler (AVX instructions).
        - - -
         
        - -
         
        - - - +
      -
    -
    + + + +

    Package neureka.backend.main.operations.linear.internal.blas Description

    +
    Everything in this package should be considered library-private! + DO NOT USE CLASSES INSIDE THIS PACKAGE!
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-tree.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-tree.html index 9f6c1e231..d813dbce9 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/blas/package-tree.html @@ -1,86 +1,147 @@ - + + - -neureka.backend.main.operations.linear.internal.blas Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.linear.internal.blas Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.linear.internal.blas

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.backend.main.operations.linear.internal.blas.AXPY
      • -
      • neureka.backend.main.operations.linear.internal.blas.COPY
      • -
      • neureka.backend.main.operations.linear.internal.blas.DOT
      • -
      • neureka.backend.main.operations.linear.internal.blas.GEMM
      • -
      • neureka.backend.main.operations.linear.internal.blas.IAXPY
      • -
      • neureka.backend.main.operations.linear.internal.blas.IDOT
      • -
      • neureka.backend.main.operations.linear.internal.blas.IGEMM
      • +
      • neureka.backend.main.operations.linear.internal.blas.AXPY
      • +
      • neureka.backend.main.operations.linear.internal.blas.COPY
      • +
      • neureka.backend.main.operations.linear.internal.blas.DOT
      • +
      • neureka.backend.main.operations.linear.internal.blas.GEMM
      • +
      • neureka.backend.main.operations.linear.internal.blas.IAXPY
      • +
      • neureka.backend.main.operations.linear.internal.blas.IDOT
      • +
      • neureka.backend.main.operations.linear.internal.blas.IGEMM
    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLGEMM.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLGEMM.html index b04d5895c..5adc93c3b 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLGEMM.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLGEMM.html @@ -1,180 +1,290 @@ - + + - -CLGEMM (neureka 1.0.0 API) - - - - + +CLGEMM (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLGEMM

    +
    neureka.backend.main.operations.linear.internal.opencl
    +

    Class CLGEMM

    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.opencl.CLGEMM
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.opencl.CLGEMM
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CLGEMM
      +extends java.lang.Object
      +implements ImplementationFor<OpenCLDevice>
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLGEMM

        -
        public CLGEMM()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLGEMM

            +
            public CLGEMM()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<OpenCLDevice> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<OpenCLDevice>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<OpenCLDevice>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.Type.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.Type.html index 897da5586..8300335b4 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.Type.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.Type.html @@ -1,221 +1,342 @@ - + + - -CLReduce.Type (neureka 1.0.0 API) - - - - + +CLReduce.Type (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class CLReduce.Type

    -
    -
    java.lang.Object -
    java.lang.Enum<CLReduce.Type> -
    neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
    +
    neureka.backend.main.operations.linear.internal.opencl
    +

    Enum CLReduce.Type

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<CLReduce.Type>
      • +
      • +
          +
        • neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from class java.lang.Enum

      -Enum.EnumDesc<E extends Enum<E>>
      -
      +
      +
      public static enum CLReduce.Type
      +extends java.lang.Enum<CLReduce.Type>
    • - -
    • -
      -

      Enum Constant Summary

      -
      Enum Constants
      -
      -
      Enum Constant
      -
      Description
      - -
       
      - -
       
      +
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static CLReduce.Type[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static CLReduce.Type[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (CLReduce.Type c : CLReduce.Type.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static CLReduce.Type valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static CLReduce.Type valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.html index 18b7469ad..9f1152a30 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLReduce.html @@ -1,224 +1,345 @@ - + + - -CLReduce (neureka 1.0.0 API) - - - - + +CLReduce (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.opencl.CLReduce
    +
    neureka.backend.main.operations.linear.internal.opencl
    +

    Class CLReduce

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.opencl.CLReduce
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static enum 
      - -
       
      +
      +
      public class CLReduce
      +extends java.lang.Object
      +implements ImplementationFor<OpenCLDevice>
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        INDICES_MAPPER_ID

        -
        public static String INDICES_MAPPER_ID
        -
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            INDICES_MAPPER_ID

            +
            public static java.lang.String INDICES_MAPPER_ID
          -
    • +
    -
  • -
    -

    Constructor Details

    -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      - -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<java.lang.Integer> run(ExecutionCall<OpenCLDevice> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<OpenCLDevice>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<OpenCLDevice>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLSum.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLSum.html index 98593314e..6a3f90e2f 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLSum.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/CLSum.html @@ -1,196 +1,310 @@ - + + - -CLSum (neureka 1.0.0 API) - - - - + +CLSum (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.main.operations.linear.internal.opencl.CLSum
    +
    neureka.backend.main.operations.linear.internal.opencl
    +

    Class CLSum

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.linear.internal.opencl.CLSum
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class CLSum
      +extends java.lang.Object
      +implements ImplementationFor<OpenCLDevice>
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLSum

        -
        public CLSum()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLSum

            +
            public CLSum()
          -
    • +
    -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      run

      +
      public static Tensor<java.lang.Float> run(Tensor<java.lang.Float> in,
      +                                          OpenCLDevice device)
      This method compiles and executes the kernel that will return the sum of the elements in the in tensor.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-frame.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-frame.html new file mode 100644 index 000000000..7e73c2fde --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.backend.main.operations.linear.internal.opencl (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.linear.internal.opencl

    +
    +

    Classes

    + +

    Enums

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-summary.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-summary.html index 4153479e3..6e7042196 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-summary.html @@ -1,94 +1,162 @@ - + + - -neureka.backend.main.operations.linear.internal.opencl (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.linear.internal.opencl (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.linear.internal.opencl

    -
    -
    -
    package neureka.backend.main.operations.linear.internal.opencl
    -
    -
      -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
       
      - -
       
      - -
       
      - -
       
      -
      -
      +

      Package neureka.backend.main.operations.linear.internal.opencl

      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        CLGEMM 
        CLReduce 
        CLSum 
        +
      • +
      • + + + + + + + + + + + + +
        Enum Summary 
        EnumDescription
        CLReduce.Type 
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-tree.html b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-tree.html index 387ea471d..4c1c633ca 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/internal/opencl/package-tree.html @@ -1,87 +1,148 @@ - + + - -neureka.backend.main.operations.linear.internal.opencl Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.linear.internal.opencl Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.linear.internal.opencl

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.backend.main.operations.linear.internal.opencl.CLGEMM (implements neureka.backend.api.ImplementationFor<D>)
      • -
      • neureka.backend.main.operations.linear.internal.opencl.CLReduce (implements neureka.backend.api.ImplementationFor<D>)
      • -
      • neureka.backend.main.operations.linear.internal.opencl.CLSum (implements neureka.backend.api.ImplementationFor<D>)
      • +
      • neureka.backend.main.operations.linear.internal.opencl.CLGEMM (implements neureka.backend.api.ImplementationFor<D>)
      • +
      • neureka.backend.main.operations.linear.internal.opencl.CLReduce (implements neureka.backend.api.ImplementationFor<D>)
      • +
      • neureka.backend.main.operations.linear.internal.opencl.CLSum (implements neureka.backend.api.ImplementationFor<D>)
    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • java.lang.Enum<E> (implements java.lang.Comparable<T>, java.lang.constant.Constable, java.io.Serializable) +
      • java.lang.Enum<E> (implements java.lang.Comparable<T>, java.io.Serializable)
          -
        • neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
        • +
        • neureka.backend.main.operations.linear.internal.opencl.CLReduce.Type
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/package-frame.html b/docs/jdocs/neureka/backend/main/operations/linear/package-frame.html new file mode 100644 index 000000000..1cc319403 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/linear/package-frame.html @@ -0,0 +1,23 @@ + + + + + +neureka.backend.main.operations.linear (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.linear

    + + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/package-summary.html b/docs/jdocs/neureka/backend/main/operations/linear/package-summary.html index e9217220f..1ea1d4d98 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/package-summary.html @@ -1,134 +1,168 @@ - + + - -neureka.backend.main.operations.linear (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.linear (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.linear

    -
    -
    -
    package neureka.backend.main.operations.linear
    -
    -
    Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
    -
    -
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/operations/linear/package-tree.html b/docs/jdocs/neureka/backend/main/operations/linear/package-tree.html index eae54d362..9bc763ad2 100644 --- a/docs/jdocs/neureka/backend/main/operations/linear/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/linear/package-tree.html @@ -1,79 +1,142 @@ - + + - -neureka.backend.main.operations.linear Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.linear Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.linear

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/Addition.html b/docs/jdocs/neureka/backend/main/operations/operator/Addition.html index ffe28c85a..9cd7abd41 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/Addition.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/Addition.html @@ -1,233 +1,310 @@ - + + - -Addition (neureka 1.0.0 API) - - - - + +Addition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Addition

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.operator.Addition
    -
    +
    neureka.backend.main.operations.operator
    +

    Class Addition

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Addition

        -
        public Addition()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Addition

            +
            public Addition()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -243,33 +320,95 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • -
  • -
    -

    calculate

    -
    public static double calculate(double[] inputs, - int d, - Function[] src)
    -
    + + + + +
      +
    • +

      calculate

      +
      public static double calculate(double[] inputs,
      +                               int d,
      +                               Function[] src)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/Division.html b/docs/jdocs/neureka/backend/main/operations/operator/Division.html index b48761867..c4aa16162 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/Division.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/Division.html @@ -1,233 +1,310 @@ - + + - -Division (neureka 1.0.0 API) - - - - + +Division (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Division

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.operator.Division
    -
    +
    neureka.backend.main.operations.operator
    +

    Class Division

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Division

        -
        public Division()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Division

            +
            public Division()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -243,33 +320,95 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • -
  • -
    -

    calculate

    -
    public static double calculate(double[] inputs, - int d, - Function[] src)
    -
    + + + + +
      +
    • +

      calculate

      +
      public static double calculate(double[] inputs,
      +                               int d,
      +                               Function[] src)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/Modulo.html b/docs/jdocs/neureka/backend/main/operations/operator/Modulo.html index 61013321a..37f8458ff 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/Modulo.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/Modulo.html @@ -1,241 +1,321 @@ - + + - -Modulo (neureka 1.0.0 API) - - - - + +Modulo (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Modulo

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.operator.Modulo
    -
    +
    neureka.backend.main.operations.operator
    +

    Class Modulo

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Modulo

        -
        public Modulo()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Modulo

            +
            public Modulo()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -251,25 +331,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/Multiplication.html b/docs/jdocs/neureka/backend/main/operations/operator/Multiplication.html index 969253bf7..68b9e39ec 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/Multiplication.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/Multiplication.html @@ -1,246 +1,327 @@ - + + - -Multiplication (neureka 1.0.0 API) - - - - + +Multiplication (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Multiplication

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.operator.Multiplication
    -
    +
    neureka.backend.main.operations.operator
    +

    Class Multiplication

    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Multiplication

        -
        public Multiplication()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Multiplication

            +
            public Multiplication()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -256,33 +337,95 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • -
  • -
    -

    calculate

    -
    public static double calculate(double[] inputs, - int d, - Function[] src)
    -
    + + + + +
      +
    • +

      calculate

      +
      public static double calculate(double[] inputs,
      +                               int d,
      +                               Function[] src)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/Power.html b/docs/jdocs/neureka/backend/main/operations/operator/Power.html index e00d26df4..3e2b6bf05 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/Power.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/Power.html @@ -1,233 +1,310 @@ - + + - -Power (neureka 1.0.0 API) - - - - + +Power (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.operator.Power
    -
    +
    neureka.backend.main.operations.operator
    +

    Class Power

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Power

        -
        public Power()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Power

            +
            public Power()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -243,33 +320,95 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • -
  • -
    -

    calculate

    -
    public static double calculate(double[] inputs, - int d, - Function[] src)
    -
    + + + + +
      +
    • +

      calculate

      +
      public static double calculate(double[] inputs,
      +                               int d,
      +                               Function[] src)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/Subtraction.html b/docs/jdocs/neureka/backend/main/operations/operator/Subtraction.html index 2dcf80c97..8c8c31fac 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/Subtraction.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/Subtraction.html @@ -1,233 +1,310 @@ - + + - -Subtraction (neureka 1.0.0 API) - - - - + +Subtraction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Subtraction

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.operator.Subtraction
    -
    +
    neureka.backend.main.operations.operator
    +

    Class Subtraction

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Subtraction

        -
        public Subtraction()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Subtraction

            +
            public Subtraction()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -243,33 +320,95 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • -
  • -
    -

    calculate

    -
    public static double calculate(double[] inputs, - int d, - Function[] src)
    -
    + + + + +
      +
    • +

      calculate

      +
      public static double calculate(double[] inputs,
      +                               int d,
      +                               Function[] src)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/package-frame.html b/docs/jdocs/neureka/backend/main/operations/operator/package-frame.html new file mode 100644 index 000000000..279856bb2 --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/operator/package-frame.html @@ -0,0 +1,24 @@ + + + + + +neureka.backend.main.operations.operator (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.operator

    + + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/package-summary.html b/docs/jdocs/neureka/backend/main/operations/operator/package-summary.html index e0d6b5b47..fe2921adb 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/package-summary.html @@ -1,136 +1,172 @@ - + + - -neureka.backend.main.operations.operator (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.operator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.operator

    -
    -
    -
    package neureka.backend.main.operations.operator
    -
    -
    Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
    -
    -
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/operations/operator/package-tree.html b/docs/jdocs/neureka/backend/main/operations/operator/package-tree.html index 3c8b5b69a..1961f2eba 100644 --- a/docs/jdocs/neureka/backend/main/operations/operator/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/operator/package-tree.html @@ -1,80 +1,143 @@ - + + - -neureka.backend.main.operations.operator Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.operator Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.operator

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.backend.api.template.operations.AbstractOperation (implements neureka.backend.api.Operation) +
      • neureka.backend.api.template.operations.AbstractOperation (implements neureka.backend.api.Operation)
          -
        • neureka.backend.main.operations.operator.Addition
        • -
        • neureka.backend.main.operations.operator.Division
        • -
        • neureka.backend.main.operations.operator.Modulo
        • -
        • neureka.backend.main.operations.operator.Multiplication
        • -
        • neureka.backend.main.operations.operator.Power
        • -
        • neureka.backend.main.operations.operator.Subtraction
        • +
        • neureka.backend.main.operations.operator.Addition
        • +
        • neureka.backend.main.operations.operator.Division
        • +
        • neureka.backend.main.operations.operator.Modulo
        • +
        • neureka.backend.main.operations.operator.Multiplication
        • +
        • neureka.backend.main.operations.operator.Power
        • +
        • neureka.backend.main.operations.operator.Subtraction
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/AssignLeft.html b/docs/jdocs/neureka/backend/main/operations/other/AssignLeft.html index 8f28477c8..2664cc8ab 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/AssignLeft.html +++ b/docs/jdocs/neureka/backend/main/operations/other/AssignLeft.html @@ -1,210 +1,286 @@ - + + - -AssignLeft (neureka 1.0.0 API) - - - - + +AssignLeft (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AssignLeft

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.AssignLeft
    -
    +
    neureka.backend.main.operations.other
    +

    Class AssignLeft

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AssignLeft

        -
        public AssignLeft()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AssignLeft

            +
            public AssignLeft()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -220,25 +296,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Cat.html b/docs/jdocs/neureka/backend/main/operations/other/Cat.html index 2a6c703a4..550550249 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Cat.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Cat.html @@ -1,194 +1,266 @@ - + + - -Cat (neureka 1.0.0 API) - - - - + +Cat (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Cat
    +
    neureka.backend.main.operations.other
    +

    Class Cat

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Cat

        -
        public Cat()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Cat

            +
            public Cat()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      execute

      -
      public Result execute(Function caller, - ExecutionCall<?> call)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          execute

          +
          public Result execute(Function caller,
          +                      ExecutionCall<?> call)
        • -
        • -
          -

          calculate

          -
          public double calculate(double[] inputs, - int j, - int d, - Function[] src)
          -
          Description copied from interface: Operation
          +
        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -204,25 +276,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/DimFit.html b/docs/jdocs/neureka/backend/main/operations/other/DimFit.html index d724b8018..5b6dcbdc1 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/DimFit.html +++ b/docs/jdocs/neureka/backend/main/operations/other/DimFit.html @@ -1,183 +1,251 @@ - + + - -DimFit (neureka 1.0.0 API) - - - - + +DimFit (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DimFit

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.DimFit
    +
    neureka.backend.main.operations.other
    +

    Class DimFit

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        DimFit

        -
        public DimFit()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            DimFit

            +
            public DimFit()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +261,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/DimTrim.html b/docs/jdocs/neureka/backend/main/operations/other/DimTrim.html index e14ca0c59..b92968053 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/DimTrim.html +++ b/docs/jdocs/neureka/backend/main/operations/other/DimTrim.html @@ -1,192 +1,264 @@ - + + - -DimTrim (neureka 1.0.0 API) - - - - + +DimTrim (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DimTrim

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.DimTrim
    +
    neureka.backend.main.operations.other
    +

    Class DimTrim

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        DimTrim

        -
        public DimTrim()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            DimTrim

            +
            public DimTrim()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      endsFrom

      -
      public static int[] endsFrom(int[] shape)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          endsFrom

          +
          public static int[] endsFrom(int[] shape)
        • -
        • -
          -

          calculate

          -
          public double calculate(double[] inputs, - int j, - int d, - Function[] src)
          -
          Description copied from interface: Operation
          +
        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -202,25 +274,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Max.html b/docs/jdocs/neureka/backend/main/operations/other/Max.html index b45de084b..4518f794f 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Max.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Max.html @@ -1,183 +1,251 @@ - + + - -Max (neureka 1.0.0 API) - - - - + +Max (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Max
    +
    neureka.backend.main.operations.other
    +

    Class Max

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Max

        -
        public Max()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Max

            +
            public Max()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +261,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Min.html b/docs/jdocs/neureka/backend/main/operations/other/Min.html index 2a50477e3..6a54fc9db 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Min.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Min.html @@ -1,183 +1,251 @@ - + + - -Min (neureka 1.0.0 API) - - - - + +Min (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Min
    +
    neureka.backend.main.operations.other
    +

    Class Min

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Min

        -
        public Min()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Min

            +
            public Min()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +261,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Permute.html b/docs/jdocs/neureka/backend/main/operations/other/Permute.html index fd70b205c..6cae79c73 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Permute.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Permute.html @@ -1,219 +1,299 @@ - + + - -Permute (neureka 1.0.0 API) - - - - + +Permute (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Permute

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Permute
    -
    +
    neureka.backend.main.operations.other
    +

    Class Permute

    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Permute

        -
        public Permute()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Permute

            +
            public Permute()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    calculate

    -
    public double calculate(double[] inputs, - int j, - int d, - Function[] src)
    -
    Description copied from interface: Operation
    + + + + +
      +
    • +

      calculate

      +
      public double calculate(double[] inputs,
      +                        int j,
      +                        int d,
      +                        Function[] src)
      +
      Description copied from interface: Operation
      This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
      + Function instances...
      ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

      This is also the reason why the last parameter of this method is a list of Function objects : @@ -229,25 +309,84 @@

      calculate

      Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - An array of scalar input variables.
      j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
      d - The index of the variable of which a derivative ought to be calculated.
      src - The child nodes of the Function node to which this very OperationType belongs.
      -
      Returns:
      +
      Returns:
      The result of the calculation.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Randomization.html b/docs/jdocs/neureka/backend/main/operations/other/Randomization.html index fa1fc0031..0323d2b39 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Randomization.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Randomization.html @@ -1,190 +1,258 @@ - + + - -Randomization (neureka 1.0.0 API) - - - - + +Randomization (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Randomization

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Randomization
    +
    neureka.backend.main.operations.other
    +

    Class Randomization

    -
    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Operation
      +
      Operation

      -
      public class Randomization -extends AbstractOperation
      -
      This Operation takes an optional user seed, +
      +
      public class Randomization
      +extends AbstractOperation
      +
      This Operation takes an optional user seed, the shape of its input tensor, and the indices of individual elements within said tensor to generate floats or doubles with a gaussian distribution where the mean is 0 and the standard deviation is 1. This operation is very fast because it generates numbers in parallel unlike - the JDKs random number generator class Random.
      -
    -
    -
      + the JDKs random number generator class Random.
    + + +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Randomization

        -
        public Randomization()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Randomization

            +
            public Randomization()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -200,25 +268,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/ReLayout.html b/docs/jdocs/neureka/backend/main/operations/other/ReLayout.html index 25be98a3f..b8637a293 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/ReLayout.html +++ b/docs/jdocs/neureka/backend/main/operations/other/ReLayout.html @@ -1,187 +1,256 @@ - + + - -ReLayout (neureka 1.0.0 API) - - - - + +ReLayout (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ReLayout

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.ReLayout
    +
    neureka.backend.main.operations.other
    +

    Class ReLayout

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ReLayout

        -
        public ReLayout()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ReLayout

            +
            public ReLayout()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -197,32 +266,94 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    • -
    • -
      -

      toLayout

      -
      public static Tensor<?> toLayout(Tensor<?> t, - NDConfiguration.Layout target)
      -
      +
    + + + +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Reshape.html b/docs/jdocs/neureka/backend/main/operations/other/Reshape.html index 56d02a95d..759472ca3 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Reshape.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Reshape.html @@ -1,183 +1,251 @@ - + + - -Reshape (neureka 1.0.0 API) - - - - + +Reshape (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Reshape

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Reshape
    +
    neureka.backend.main.operations.other
    +

    Class Reshape

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Reshape

        -
        public Reshape()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Reshape

            +
            public Reshape()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +261,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Slice.html b/docs/jdocs/neureka/backend/main/operations/other/Slice.html index 471f1cf41..e0d22153b 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Slice.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Slice.html @@ -1,183 +1,251 @@ - + + - -Slice (neureka 1.0.0 API) - - - - + +Slice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Slice

    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Slice
    +
    neureka.backend.main.operations.other
    +

    Class Slice

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Slice

        -
        public Slice()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Slice

            +
            public Slice()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +261,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/Sum.html b/docs/jdocs/neureka/backend/main/operations/other/Sum.html index 06d17d1f0..bd5c913e8 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/Sum.html +++ b/docs/jdocs/neureka/backend/main/operations/other/Sum.html @@ -1,183 +1,251 @@ - + + - -Sum (neureka 1.0.0 API) - - - - + +Sum (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    java.lang.Object -
    neureka.backend.api.template.operations.AbstractOperation -
    neureka.backend.main.operations.other.Sum
    +
    neureka.backend.main.operations.other
    +

    Class Sum

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Sum

        -
        public Sum()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Sum

            +
            public Sum()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      calculate

      -
      public double calculate(double[] inputs, - int j, - int d, - Function[] src)
      -
      Description copied from interface: Operation
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          calculate

          +
          public double calculate(double[] inputs,
          +                        int j,
          +                        int d,
          +                        Function[] src)
          +
          Description copied from interface: Operation
          This method mainly ought to serve as a reference- and fallback- implementation for tensor backends and also as the backend for handling the calculation of scalar inputs passed to a given abstract syntax tree of - Function instances...
          + Function instances...
          ( (almost) every Function instance contains an OperationType reference to which it passes scalar executions... )

          This is also the reason why the last parameter of this method is a list of Function objects : @@ -193,25 +261,84 @@

          calculate

          Depending on this variable and also the nature of the operation, the execution calls to the child nodes of this node change considerably!

          -
          -
          Parameters:
          +
          +
          Parameters:
          inputs - An array of scalar input variables.
          j - The index variable for indexed execution on the input array. (-1 if no indexing should occur)
          d - The index of the variable of which a derivative ought to be calculated.
          src - The child nodes of the Function node to which this very OperationType belongs.
          -
          Returns:
          +
          Returns:
          The result of the calculation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.Type.html b/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.Type.html index dc3bd50bb..0d23fd114 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.Type.html +++ b/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.Type.html @@ -1,221 +1,342 @@ - + + - -CPUReduce.Type (neureka 1.0.0 API) - - - - + +CPUReduce.Type (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class CPUReduce.Type

    -
    -
    java.lang.Object -
    java.lang.Enum<CPUReduce.Type> -
    neureka.backend.main.operations.other.internal.CPUReduce.Type
    +
    neureka.backend.main.operations.other.internal
    +

    Enum CPUReduce.Type

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<CPUReduce.Type>
      • +
      • +
          +
        • neureka.backend.main.operations.other.internal.CPUReduce.Type
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from class java.lang.Enum

      -Enum.EnumDesc<E extends Enum<E>>
      -
      +
      +
      public static enum CPUReduce.Type
      +extends java.lang.Enum<CPUReduce.Type>
    • - -
    • -
      -

      Enum Constant Summary

      -
      Enum Constants
      -
      -
      Enum Constant
      -
      Description
      - -
       
      - -
       
      +
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static CPUReduce.Type[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static CPUReduce.Type[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (CPUReduce.Type c : CPUReduce.Type.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static CPUReduce.Type valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static CPUReduce.Type valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.html b/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.html index 61276f677..d301fd2ef 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.html +++ b/docs/jdocs/neureka/backend/main/operations/other/internal/CPUReduce.html @@ -1,201 +1,315 @@ - + + - -CPUReduce (neureka 1.0.0 API) - - - - + +CPUReduce (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUReduce

    +
    neureka.backend.main.operations.other.internal
    +

    Class CPUReduce

    -
    java.lang.Object -
    neureka.backend.main.operations.other.internal.CPUReduce
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.other.internal.CPUReduce
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      ImplementationFor<CPU>
      +
      ImplementationFor<CPU>

      -
      public class CPUReduce -extends Object -implements ImplementationFor<CPU>
      +
      +
      public class CPUReduce
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      An implementation of the min and max algorithm running on the CPU. This algorithm splits the provided input tensor into chucks which are then reduced to local min and max values. This happens iteratively until only a single value is left. Each workload also returns the index of the found min/max value, which is important for backpropagation...
      -
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static enum 
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<Integer> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<java.lang.Integer> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/internal/CPUSum.html b/docs/jdocs/neureka/backend/main/operations/other/internal/CPUSum.html index 6b7ce3d41..85ec3b7ee 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/internal/CPUSum.html +++ b/docs/jdocs/neureka/backend/main/operations/other/internal/CPUSum.html @@ -1,184 +1,294 @@ - + + - -CPUSum (neureka 1.0.0 API) - - - - + +CPUSum (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPUSum

    +
    neureka.backend.main.operations.other.internal
    +

    Class CPUSum

    -
    java.lang.Object -
    neureka.backend.main.operations.other.internal.CPUSum
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.main.operations.other.internal.CPUSum
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      ImplementationFor<CPU>
      +
      ImplementationFor<CPU>

      -
      public class CPUSum -extends Object -implements ImplementationFor<CPU>
      +
      +
      public class CPUSum
      +extends java.lang.Object
      +implements ImplementationFor<CPU>
      An implementation of the sum and may algorithm running on the CPU. This algorithm splits the provided input tensor into chucks which are then reduced to local sum values. This happens iteratively until only a single value is left.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        CPUSum() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
      + -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CPUSum

        -
        public CPUSum()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CPUSum

            +
            public CPUSum()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      run

      -
      public Tensor<?> run(ExecutionCall<CPU> call)
      -
      Description copied from interface: ImplementationFor
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          run

          +
          public Tensor<?> run(ExecutionCall<CPU> call)
          +
          Description copied from interface: ImplementationFor
          This method is the entrypoint for a concrete implementation of the algorithm to which instances of this interface belong and the device on which this is implemented. One has to keep in mind that the implementation details with respect to the target device are specific to the methods of the "TargetDevice" type on which this call should run...
          -
          -
          Specified by:
          -
          run in interface ImplementationFor<CPU>
          -
          Parameters:
          +
          +
          Specified by:
          +
          run in interface ImplementationFor<CPU>
          +
          Parameters:
          call - The call which ought to be executed on this implementation.
          -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/internal/package-frame.html b/docs/jdocs/neureka/backend/main/operations/other/internal/package-frame.html new file mode 100644 index 000000000..e66228c7f --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/other/internal/package-frame.html @@ -0,0 +1,24 @@ + + + + + +neureka.backend.main.operations.other.internal (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.other.internal

    +
    +

    Classes

    + +

    Enums

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/internal/package-summary.html b/docs/jdocs/neureka/backend/main/operations/other/internal/package-summary.html index 5ed9e5720..187c12bd6 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/internal/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/other/internal/package-summary.html @@ -1,111 +1,162 @@ - + + - -neureka.backend.main.operations.other.internal (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.other.internal (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.other.internal

    -
    -
    -
    package neureka.backend.main.operations.other.internal
    -
    -
      -
    • - -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        CPUReduce
        An implementation of the min and max algorithm running on the CPU.
        - - -
         
        - -
        +
        CPUSum
        An implementation of the sum and may algorithm running on the CPU.
        - - - - +
        +
      • +
      • + + + + + + + + + + + + +
        Enum Summary 
        EnumDescription
        CPUReduce.Type 
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/internal/package-tree.html b/docs/jdocs/neureka/backend/main/operations/other/internal/package-tree.html index 45c6ced0e..c8cd13171 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/internal/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/other/internal/package-tree.html @@ -1,86 +1,147 @@ - + + - -neureka.backend.main.operations.other.internal Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.other.internal Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.other.internal

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/package-frame.html b/docs/jdocs/neureka/backend/main/operations/other/package-frame.html new file mode 100644 index 000000000..20fc3e72d --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/other/package-frame.html @@ -0,0 +1,30 @@ + + + + + +neureka.backend.main.operations.other (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations.other

    + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/package-summary.html b/docs/jdocs/neureka/backend/main/operations/other/package-summary.html index f79101d0f..623077a77 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/other/package-summary.html @@ -1,156 +1,202 @@ - + + - -neureka.backend.main.operations.other (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.other (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations.other

    -
    -
    -
    package neureka.backend.main.operations.other
    -
    -
    Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
    -
    -
    -
    -
    + + + +

    Package neureka.backend.main.operations.other Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/other/package-tree.html b/docs/jdocs/neureka/backend/main/operations/other/package-tree.html index 88a21d127..24ebb14be 100644 --- a/docs/jdocs/neureka/backend/main/operations/other/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/other/package-tree.html @@ -1,86 +1,149 @@ - + + - -neureka.backend.main.operations.other Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations.other Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations.other

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.backend.api.template.operations.AbstractOperation (implements neureka.backend.api.Operation) +
      • neureka.backend.api.template.operations.AbstractOperation (implements neureka.backend.api.Operation)
          -
        • neureka.backend.main.operations.other.AssignLeft
        • -
        • neureka.backend.main.operations.other.Cat
        • -
        • neureka.backend.main.operations.other.DimFit
        • -
        • neureka.backend.main.operations.other.DimTrim
        • -
        • neureka.backend.main.operations.other.Max
        • -
        • neureka.backend.main.operations.other.Min
        • -
        • neureka.backend.main.operations.other.Permute
        • -
        • neureka.backend.main.operations.other.Randomization
        • -
        • neureka.backend.main.operations.other.ReLayout
        • -
        • neureka.backend.main.operations.other.Reshape
        • -
        • neureka.backend.main.operations.other.Slice
        • -
        • neureka.backend.main.operations.other.Sum
        • +
        • neureka.backend.main.operations.other.AssignLeft
        • +
        • neureka.backend.main.operations.other.Cat
        • +
        • neureka.backend.main.operations.other.DimFit
        • +
        • neureka.backend.main.operations.other.DimTrim
        • +
        • neureka.backend.main.operations.other.Max
        • +
        • neureka.backend.main.operations.other.Min
        • +
        • neureka.backend.main.operations.other.Permute
        • +
        • neureka.backend.main.operations.other.Randomization
        • +
        • neureka.backend.main.operations.other.ReLayout
        • +
        • neureka.backend.main.operations.other.Reshape
        • +
        • neureka.backend.main.operations.other.Slice
        • +
        • neureka.backend.main.operations.other.Sum
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/main/operations/package-frame.html b/docs/jdocs/neureka/backend/main/operations/package-frame.html new file mode 100644 index 000000000..f670b7e1d --- /dev/null +++ b/docs/jdocs/neureka/backend/main/operations/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.backend.main.operations (neureka 1.0.1 API) + + + + +

    neureka.backend.main.operations

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/main/operations/package-summary.html b/docs/jdocs/neureka/backend/main/operations/package-summary.html index 81e4b9d9a..2a2dc3259 100644 --- a/docs/jdocs/neureka/backend/main/operations/package-summary.html +++ b/docs/jdocs/neureka/backend/main/operations/package-summary.html @@ -1,131 +1,159 @@ - + + - -neureka.backend.main.operations (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.main.operations

    -
    -
    -
    package neureka.backend.main.operations
    -
    -
    Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
    -
    -
    -
      -
    • -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
       
      - -
      -
      Methods inside this utility class execute only some ExecutionCall arguments - in groups if their total number exceeds the arity of an operation.
      -
      -
      -
      -
    • + +
    -
    -
    + + diff --git a/docs/jdocs/neureka/backend/main/operations/package-tree.html b/docs/jdocs/neureka/backend/main/operations/package-tree.html index 3cda5cdd8..213c9117e 100644 --- a/docs/jdocs/neureka/backend/main/operations/package-tree.html +++ b/docs/jdocs/neureka/backend/main/operations/package-tree.html @@ -1,72 +1,135 @@ - + + - -neureka.backend.main.operations Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.main.operations Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.main.operations

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/ocl/CLBackend.html b/docs/jdocs/neureka/backend/ocl/CLBackend.html index 0de02bab9..e148536b4 100644 --- a/docs/jdocs/neureka/backend/ocl/CLBackend.html +++ b/docs/jdocs/neureka/backend/ocl/CLBackend.html @@ -1,337 +1,489 @@ - + + - -CLBackend (neureka 1.0.0 API) - - - - + +CLBackend (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLBackend

    -
    -
    java.lang.Object -
    neureka.backend.ocl.CLBackend
    +
    neureka.backend.ocl
    +

    Class CLBackend

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.backend.ocl.CLBackend
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      BackendExtension, Component<Extensions>
      +
      BackendExtension, Component<Extensions>

      -
      public final class CLBackend -extends Object -implements BackendExtension
      -
      This is an OpenCL context component for any given BackendContext which +
      +
      public final class CLBackend
      +extends java.lang.Object
      +implements BackendExtension
      +
      This is an OpenCL context component for any given BackendContext which extends a given backend context instance for additional functionality, which in this case is the OpenCL backend storing platform and device information. - BackendContexts are thread local states - used for managing Operation, Function - as well as Component implementation instances like this one. + BackendContexts are thread local states + used for managing Operation, Function + as well as Component implementation instances like this one. A given state might not be compatible with the concepts introduced in other contexts which is why it makes sense to have separate "worlds" with potential different operations... - The component system of the BackendContext exist so that a given context + The component system of the BackendContext exist so that a given context can be extended for more functionality - and also to attach relevant states like for example in this case the CLBackend + and also to attach relevant states like for example in this case the CLBackend instance will directly or indirectly reference kernels, memory objects and other concepts exposed by OpenCL...
      -
    -
    -
      + +
    +
    +
    + - + -
  • -
    -

    Method Details

    - + + + +
  • -
  • -
    -

    getSettings

    -
    public CLSettings getSettings()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getSettings

      +
      public CLSettings getSettings()
      +
      +
      Returns:
      A container for OpenCL specific settings.
      -
  • -
  • -
    -

    update

    -
    public boolean update(Component.OwnerChangeRequest<Extensions> changeRequest)
    -
    Updating the CLContext will cause the list of existing OpenCLPlatform instances to be - cleared and refilled with completely new OpenCLPlatform instances. - This will in effect also cause the recreation of any OpenCLDevice instances - as part of these OpenCLPlatforms. + + + + +
      +
    • +

      update

      +
      public boolean update(Component.OwnerChangeRequest<Extensions> changeRequest)
      +
      Updating the CLContext will cause the list of existing OpenCLPlatform instances to be + cleared and refilled with completely new OpenCLPlatform instances. + This will in effect also cause the recreation of any OpenCLDevice instances + as part of these OpenCLPlatforms. This will subsequently cause the recompilation of many OpenCL kernels.
      -
      -
      Specified by:
      -
      update in interface Component<Extensions>
      -
      Parameters:
      -
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      -
      Returns:
      +
      +
      Specified by:
      +
      update in interface Component<Extensions>
      +
      Parameters:
      +
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      +
      Returns:
      The truth value determining if the state change should be aborted or not.
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • -
  • -
    -

    find

    -
    public BackendExtension.DeviceOption find(String searchKey)
    -
    Description copied from interface: BackendExtension
    -
    The BackendContext does not handle Device instances directly. - Instead, the task of instantiating and exposing Device implementations - should be carried by BackendExtension implementations. + + + + +
      +
    • +

      find

      +
      public BackendExtension.DeviceOption find(java.lang.String searchKey)
      +
      Description copied from interface: BackendExtension
      +
      The BackendContext does not handle Device instances directly. + Instead, the task of instantiating and exposing Device implementations + should be carried by BackendExtension implementations. One extension might be implementing CUDA operations, - therefore, the extension should also deal with some sort of CUDADevice implementation.
      -
      -
      Specified by:
      -
      find in interface BackendExtension
      -
      Parameters:
      -
      searchKey - The search key used to find a suitable Device implementation in this extension.
      -
      Returns:
      -
      A suitable BackendExtension.DeviceOption or null if nothing was found.
      + therefore, the extension should also deal with some sort of CUDADevice implementation.
    +
    +
    Specified by:
    +
    find in interface BackendExtension
    +
    Parameters:
    +
    searchKey - The search key used to find a suitable Device implementation in this extension.
    +
    Returns:
    +
    A suitable BackendExtension.DeviceOption or null if nothing was found.
    -
  • -
  • -
    -

    reset

    -
    public void reset()
    -
    Description copied from interface: BackendExtension
    -
    This will indirectly be called through the Neureka.reset() method, + + + + +
  • -
  • -
    -

    dispose

    -
    public void dispose()
    + + + + +
      +
    • +

      dispose

      +
      public void dispose()
      This method will free all the resources occupied by this context, meaning that all platforms and their devices will be disposed. Their kernels will be removed and their tensors restored.
      -
      -
      Specified by:
      -
      dispose in interface BackendExtension
      +
      +
      Specified by:
      +
      dispose in interface BackendExtension
      -
  • -
  • -
    -

    getLoader

    -
    public BackendLoader getLoader()
    -
    -
    Specified by:
    -
    getLoader in interface BackendExtension
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/ocl/CLSettings.html b/docs/jdocs/neureka/backend/ocl/CLSettings.html index 70fb7503f..eecd736c8 100644 --- a/docs/jdocs/neureka/backend/ocl/CLSettings.html +++ b/docs/jdocs/neureka/backend/ocl/CLSettings.html @@ -1,177 +1,295 @@ - + + - -CLSettings (neureka 1.0.0 API) - - - - + +CLSettings (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLSettings

    -
    -
    java.lang.Object -
    neureka.backend.ocl.CLSettings
    +
    neureka.backend.ocl
    +

    Class CLSettings

    -
    -
    -
    public class CLSettings -extends Object
    -
    OpenCL related settings for the CLBackend extension.
    -
    -
    -
      - +
      +
        +
      • java.lang.Object
      • -
        -

        Constructor Summary

        -
        Constructors
        -
        -
        Constructor
        -
        Description
        - -
         
        +
          +
        • neureka.backend.ocl.CLSettings
        • +
        +
      • +
      +
      +
        +
      • +
        +
        +
        public class CLSettings
        +extends java.lang.Object
        +
        OpenCL related settings for the CLBackend extension.
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLSettings

        -
        public CLSettings()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLSettings

            +
            public CLSettings()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      isAutoConvertToFloat

      -
      public boolean isAutoConvertToFloat()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isAutoConvertToFloat

          +
          public boolean isAutoConvertToFloat()
        • -
        • -
          -

          setAutoConvertToFloat

          -
          public CLSettings setAutoConvertToFloat(boolean autoConvertToFloat)
          -
          +
        + + + +
          +
        • +

          setAutoConvertToFloat

          +
          public CLSettings setAutoConvertToFloat(boolean autoConvertToFloat)
        • -
        • -
          -

          reset

          -
          public void reset()
          -
          +
        + + + +
          +
        • +

          reset

          +
          public void reset()
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/backend/ocl/package-frame.html b/docs/jdocs/neureka/backend/ocl/package-frame.html new file mode 100644 index 000000000..e913a749f --- /dev/null +++ b/docs/jdocs/neureka/backend/ocl/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.backend.ocl (neureka 1.0.1 API) + + + + +

    neureka.backend.ocl

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/backend/ocl/package-summary.html b/docs/jdocs/neureka/backend/ocl/package-summary.html index 4001ddecc..5271ab73a 100644 --- a/docs/jdocs/neureka/backend/ocl/package-summary.html +++ b/docs/jdocs/neureka/backend/ocl/package-summary.html @@ -1,90 +1,149 @@ - + + - -neureka.backend.ocl (neureka 1.0.0 API) - - - - + +neureka.backend.ocl (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.backend.ocl

    +

    Package neureka.backend.ocl

    -
    -
    package neureka.backend.ocl
    -
    -
      -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      -
      This is an OpenCL context component for any given BackendContext which +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        CLBackend +
        This is an OpenCL context component for any given BackendContext which extends a given backend context instance for additional functionality, which in this case is the OpenCL backend storing platform and device information.
        - - -
        -
        OpenCL related settings for the CLBackend extension.
        -
        - - +
        CLSettings +
        OpenCL related settings for the CLBackend extension.
        +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/backend/ocl/package-tree.html b/docs/jdocs/neureka/backend/ocl/package-tree.html index f70a83c94..bb31a90f9 100644 --- a/docs/jdocs/neureka/backend/ocl/package-tree.html +++ b/docs/jdocs/neureka/backend/ocl/package-tree.html @@ -1,72 +1,135 @@ - + + - -neureka.backend.ocl Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.backend.ocl Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.backend.ocl

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/common/composition/AbstractComponentOwner.html b/docs/jdocs/neureka/common/composition/AbstractComponentOwner.html index 758882496..2d469b310 100644 --- a/docs/jdocs/neureka/common/composition/AbstractComponentOwner.html +++ b/docs/jdocs/neureka/common/composition/AbstractComponentOwner.html @@ -1,380 +1,473 @@ - + + - -AbstractComponentOwner (neureka 1.0.0 API) - - - - + +AbstractComponentOwner (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractComponentOwner<C>

    -
    -
    java.lang.Object -
    neureka.common.composition.AbstractComponentOwner<C>
    +
    neureka.common.composition
    +

    Class AbstractComponentOwner<C>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.composition.AbstractComponentOwner<C>
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
      +
    • + + -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      protected void
      - -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Concrete Methods 
        Modifier and TypeMethod and Description
        protected void_deleteComponents()
        This method deletes the array of components of this component owner by nulling the array variable field.
        - -
        protected abstract <T extends Component<C>>
        T
        -
        _removeOrReject(T newComponent)
        -
        +
        protected abstract <T extends Component<C>>
        T
        _removeOrReject(T newComponent)
        An implementation of this method checks if the passed component should be removed from the component collection of this class or its removal should be "rejected".
        - -
        protected <T> void
        -
        _set(Component<T> anyComponent)
        -
         
        -
        protected abstract <T extends Component<C>>
        T
        -
        _setOrReject(T newComponent)
        -
        +
        protected <T> void_set(Component<T> anyComponent) 
        protected abstract <T extends Component<C>>
        T
        _setOrReject(T newComponent)
        This abstract method ought to be implemented further down the inheritance hierarchy where it's responsibility makes more sense, namely : An implementation of this method checks if the passed component should be added or "rejected" to the component collection of this class.
        - -
        protected C
        - -
         
        -
        protected void
        - -
        +
        protected C_this() 
        protected void_transferFrom(AbstractComponentOwner<C> other)
        A component owner might need to exchange components.
        - -
        <T extends Component>
        Optional<T>
        -
        find(Class<T> componentClass)
        -
        +
        <T extends Component>
        java.util.Optional<T>
        find(java.lang.Class<T> componentClass)
        This method finds a component of the given type/class - and returns it as an Optional which may or may not be empty.
        - -
        <T extends Component<?>>
        T
        -
        get(Class<T> componentClass)
        -
        + and returns it as an Optional which may or may not be empty.
        +
        <T extends Component<?>>
        T
        get(java.lang.Class<T> componentClass)
        This method tries to find a component inside the internal component array whose class matches the one provided.
        - -
        <T extends Component<?>>
        List<T>
        -
        getAll(Class<T> componentClass)
        -
        +
        <T extends Component<?>>
        java.util.List<T>
        getAll(java.lang.Class<T> componentClass)
        This method tries to find all components inside the internal component array whose classes are sub types of the one provided.
        - -
        <T extends Component<C>>
        boolean
        -
        has(Class<T> componentClass)
        -
        -
        This method checks if a component identified by the passed Class +
        <T extends Component<C>>
        boolean
        has(java.lang.Class<T> componentClass) +
        This method checks if a component identified by the passed Class instance is present inside the stored component collection.
        - -
        <T extends Component<C>>
        C
        -
        remove(Class<T> componentClass)
        -
        +
        <T extends Component<C>>
        C
        remove(java.lang.Class<T> componentClass)
        This method removes a component identified by the passed Class instance if found in the stored component collection.
        - -
        <T extends Component<C>>
        C
        -
        set(T newComponent)
        -
        +
        <T extends Component<C>>
        C
        set(T newComponent)
        This methods stores the passed component inside the component collection of this class...
        - - - - -
        -

        Methods inherited from class java.lang.Object

        -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        - +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AbstractComponentOwner

        -
        public AbstractComponentOwner()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AbstractComponentOwner

            +
            public AbstractComponentOwner()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      _this

      -
      protected C _this()
      -
      +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          _transferFrom

          +
          protected void _transferFrom(AbstractComponentOwner<C> other)
          A component owner might need to exchange components.
          Meaning that the components of another owner will be transferred and adopted by the current one. During this process the transferred components will be notified of their new owner. This is important because some components might reference their owners...

          - This change happens for example in the Tensor class when tensors are being instantiated by + This change happens for example in the Tensor class when tensors are being instantiated by certain constructors which require the injection of the contents of another tensor into a new one.
          -
          -
          Parameters:
          +
          +
          Parameters:
          other - The other owner which will be stripped of its components which are then incorporated into this owner.
          -
  • -
  • -
    -

    _deleteComponents

    -
    protected void _deleteComponents()
    + + + + +
      +
    • +

      _deleteComponents

      +
      protected void _deleteComponents()
      This method deletes the array of components of this component owner by nulling the array variable field.
      -
  • -
  • -
    -

    get

    -
    public <T extends Component<?>> T get(Class<T> componentClass)
    + + + + +
      +
    • +

      get

      +
      public <T extends Component<?>> T get(java.lang.Class<T> componentClass)
      This method tries to find a component inside the internal component array whose class matches the one provided. If no such component could be found then the return value will simply be null.
      -
      -
      Specified by:
      -
      get in interface ComponentOwner<C>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      get in interface ComponentOwner<C>
      +
      Type Parameters:
      T - The type parameter defining the component class.
      -
      Parameters:
      +
      Parameters:
      componentClass - The type/class of the component which shall be found and returned.
      -
      Returns:
      +
      Returns:
      The correct component or null if nothing has been found.
      -
  • -
  • -
    -

    getAll

    -
    public <T extends Component<?>> List<T> getAll(Class<T> componentClass)
    + + + + +
      +
    • +

      getAll

      +
      public <T extends Component<?>> java.util.List<T> getAll(java.lang.Class<T> componentClass)
      This method tries to find all components inside the internal component array whose classes are sub types of the one provided. If no such components could be found then the return value will simply be an empty list.
      -
      -
      Specified by:
      -
      getAll in interface ComponentOwner<C>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      getAll in interface ComponentOwner<C>
      +
      Type Parameters:
      T - The type parameter defining the component class.
      -
      Parameters:
      +
      Parameters:
      componentClass - The type/class of the components which shall be found and returned as list.
      -
      Returns:
      +
      Returns:
      The correct component or null if nothing has been found.
      -
  • -
  • -
    -

    remove

    -
    public <T extends Component<C>> C remove(Class<T> componentClass)
    + + + + +
      +
    • +

      remove

      +
      public <T extends Component<C>> C remove(java.lang.Class<T> componentClass)
      This method removes a component identified by the passed Class instance if found in the stored component collection.
      -
      -
      Specified by:
      -
      remove in interface ComponentOwner<C>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      remove in interface ComponentOwner<C>
      +
      Type Parameters:
      T - The type parameter of the component which will be removed by this method.
      -
      Parameters:
      +
      Parameters:
      componentClass - The type/class of a component which will be removed by this method.
      -
      Returns:
      +
      Returns:
      This very class.
      -
  • -
  • -
    -

    has

    -
    public <T extends Component<C>> boolean has(Class<T> componentClass)
    -
    This method checks if a component identified by the passed Class + + + + +
      +
    • +

      has

      +
      public <T extends Component<C>> boolean has(java.lang.Class<T> componentClass)
      +
      This method checks if a component identified by the passed Class instance is present inside the stored component collection.
      -
      -
      Specified by:
      -
      has in interface ComponentOwner<C>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      has in interface ComponentOwner<C>
      +
      Type Parameters:
      T - The type of the component to be checked.
      -
      Parameters:
      +
      Parameters:
      componentClass - The class/type of component that might exist in components.
      -
      Returns:
      +
      Returns:
      True if the component of the given type/class has been found.
      -
  • -
  • -
    -

    set

    -
    public <T extends Component<C>> C set(T newComponent)
    + + + + + + +
      +
    • +

      set

      +
      public <T extends Component<C>> C set(T newComponent)
      This methods stores the passed component inside the component collection of this class... However, it only adds the provided component if it is not "rejected" by an abstract method, namely : "_addOrReject" ! Rejection means that this method simply returns null.
      -
      -
      Specified by:
      -
      set in interface ComponentOwner<C>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      set in interface ComponentOwner<C>
      +
      Type Parameters:
      T - The type of the component to be set.
      -
      Parameters:
      +
      Parameters:
      newComponent - The new component which should be added to the components list.
      -
      Returns:
      +
      Returns:
      This very class.
      -
  • -
  • -
    -

    _set

    -
    protected <T> void _set(Component<T> anyComponent)
    -
    + + + + +
      +
    • +

      _set

      +
      protected <T> void _set(Component<T> anyComponent)
    • -
    • -
      -

      _setOrReject

      -
      protected abstract <T extends Component<C>> T _setOrReject(T newComponent)
      +
    + + + + + +
      +
    • +

      _setOrReject

      +
      protected abstract <T extends Component<C>> T _setOrReject(T newComponent)
      This abstract method ought to be implemented further down the inheritance hierarchy where it's responsibility makes more sense, namely : @@ -383,59 +476,126 @@

      _setOrReject

      of this class. Rejection in this case simply means that it returns null instead of the passed component.
      -
      -
      Parameters:
      +
      +
      Parameters:
      newComponent - The component which should be added to the components list.
      -
      Returns:
      +
      Returns:
      The same component or null if it has been rejected.
      -
    • -
    • -
      -

      _removeOrReject

      -
      protected abstract <T extends Component<C>> T _removeOrReject(T newComponent)
      +
    + + + + + +
      +
    • +

      _removeOrReject

      +
      protected abstract <T extends Component<C>> T _removeOrReject(T newComponent)
      An implementation of this method checks if the passed component should be removed from the component collection of this class or its removal should be "rejected". Rejection in this case simply means that it returns null instead of the passed component.
      -
      -
      Parameters:
      +
      +
      Parameters:
      newComponent - The component which should be removed from the components list.
      -
      Returns:
      +
      Returns:
      The same component or null if its removal has been rejected.
      -
    • -
    • -
      -

      find

      -
      public <T extends Component> Optional<T> find(Class<T> componentClass)
      +
    + + + +
      +
    • +

      find

      +
      public <T extends Component> java.util.Optional<T> find(java.lang.Class<T> componentClass)
      This method finds a component of the given type/class - and returns it as an Optional which may or may not be empty.
      -
      -
      Specified by:
      -
      find in interface ComponentOwner<C>
      -
      Type Parameters:
      + and returns it as an Optional which may or may not be empty.
  • +
    +
    Specified by:
    +
    find in interface ComponentOwner<C>
    +
    Type Parameters:
    T - The type parameter of the component which should be found. - This type must be a subtype of Component.
    -
    Parameters:
    + This type must be a subtype of Component. +
    Parameters:
    componentClass - The type/class of the component which should be found. - This class must be a subtype of Component.
    -
    Returns:
    -
    An Optional which may or may not be empty.
    + This class must be a subtype of Component. +
    Returns:
    +
    An Optional which may or may not be empty.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/composition/Component.IsBeing.html b/docs/jdocs/neureka/common/composition/Component.IsBeing.html index 09153d715..4f8752b31 100644 --- a/docs/jdocs/neureka/common/composition/Component.IsBeing.html +++ b/docs/jdocs/neureka/common/composition/Component.IsBeing.html @@ -1,239 +1,368 @@ - + + - -Component.IsBeing (neureka 1.0.0 API) - - - - + +Component.IsBeing (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class Component.IsBeing

    -
    -
    java.lang.Object -
    java.lang.Enum<Component.IsBeing> -
    neureka.common.composition.Component.IsBeing
    -
    +
    neureka.common.composition
    +

    Enum Component.IsBeing

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<Component.IsBeing>
      • +
      • +
          +
        • neureka.common.composition.Component.IsBeing
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from class java.lang.Enum

      -Enum.EnumDesc<E extends Enum<E>>
      -
      + of the owner of a given Component instance.
    - -
  • -
    -

    Enum Constant Summary

    -
    Enum Constants
    -
    -
    Enum Constant
    -
    Description
    - -
     
    - -
     
    - -
     
    - -
     
    +
    -
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static Component.IsBeing[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Component.IsBeing[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Component.IsBeing c : Component.IsBeing.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static Component.IsBeing valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static Component.IsBeing valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/composition/Component.OwnerChangeRequest.html b/docs/jdocs/neureka/common/composition/Component.OwnerChangeRequest.html index 8ce645770..63602ed19 100644 --- a/docs/jdocs/neureka/common/composition/Component.OwnerChangeRequest.html +++ b/docs/jdocs/neureka/common/composition/Component.OwnerChangeRequest.html @@ -1,196 +1,302 @@ - + + - -Component.OwnerChangeRequest (neureka 1.0.0 API) - - - - + +Component.OwnerChangeRequest (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Component.OwnerChangeRequest<O>

    +
    neureka.common.composition
    +

    Interface Component.OwnerChangeRequest<O>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      O - The type parameter representing the concrete type of the component owner.
      -
      +
      Enclosing interface:
      -
      Component<O>
      +
      Component<O>

      -
      public static interface Component.OwnerChangeRequest<O>
      -
      Component.OwnerChangeRequest implementation instances will be passed to - the Component.update(OwnerChangeRequest) method which inform a +
      +
      public static interface Component.OwnerChangeRequest<O>
      +
      Component.OwnerChangeRequest implementation instances will be passed to + the Component.update(OwnerChangeRequest) method which inform a given component about a state change related to said component. They are used by component owners to communicate and - negotiate update events to their components using the Component.IsBeing enum and + negotiate update events to their components using the Component.IsBeing enum and some useful methods providing both a context for a component and the ability for the component to trigger the state change itself.
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      - -
      -
      This method will trigger the actual state change identified by the Component.IsBeing - constant returned by the type() method.
      +
    • +
    - - -
     
    - - -
     
    - - -
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        getOldOwner

        -
        O getOldOwner()
        -
        -
        Returns:
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            getOldOwner

            +
            O getOldOwner()
            +
            +
            Returns:
            The previous owner type instance or null if the component is being added to the owner.
            -
      • -
      • -
        -

        getNewOwner

        -
        O getNewOwner()
        -
        -
        Returns:
        +
      + + + +
        +
      • +

        getNewOwner

        +
        O getNewOwner()
        +
        +
        Returns:
        The new owner type instance.
        -
    • -
    • -
      -

      executeChange

      -
      boolean executeChange()
      -
      This method will trigger the actual state change identified by the Component.IsBeing - constant returned by the type() method. +
    + + + +
      +
    • +

      executeChange

      +
      boolean executeChange()
      +
      This method will trigger the actual state change identified by the Component.IsBeing + constant returned by the type() method. It exists so that a component can decide when the change should occur. - If the change type is set to Component.IsBeing.ADDED for example then this would + If the change type is set to Component.IsBeing.ADDED for example then this would mean that after calling this method, the current component will now be a component of the current component owner.
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if the state change was successfully executed.
      -
    • -
    • -
      -

      type

      -
      default Component.IsBeing type()
      +
    + + + +
    +
    +
    Returns:
    The type of change that is about to happen to the component receiving this.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/composition/Component.html b/docs/jdocs/neureka/common/composition/Component.html index 2e1029b65..a84599ee9 100644 --- a/docs/jdocs/neureka/common/composition/Component.html +++ b/docs/jdocs/neureka/common/composition/Component.html @@ -1,158 +1,198 @@ - + + - -Component (neureka 1.0.0 API) - - - - + +Component (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Component<O>

    +
    neureka.common.composition
    +

    Interface Component<O>

    -
    -
    -
    Type Parameters:
    +
    +
    +
    -
    -
      +
      +
      public interface Component<O>
      +
      This interface alongside the AbstractComponentOwner class define a simple component system. + The component type defined by this interface is used to create components for the Tensor class + as well as the BackendContext class which both directly or indirectly + extend the AbstractComponentOwner class. + The type parameter of this interface represents the "owner" of the Component.
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        update

        -
        default boolean update(Component.OwnerChangeRequest<O> changeRequest)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            update

            +
            default boolean update(Component.OwnerChangeRequest<O> changeRequest)
            Components are not the slaves of their owners. If the owner registers any state changes related to a given component, then said component will be informed by the owner about the change as well as receive @@ -162,26 +202,85 @@

            update

            is being added to, or removed from, its current owner. If components hold references to their owners then this method gives them the ability to update said reference when a new owner takes over the components of an old one. - The Component.OwnerChangeRequest implementation instance passed to this method - informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). + The Component.OwnerChangeRequest implementation instance passed to this method + informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). If this method returns false then this means that this component rejects the proposed update. The component owner will then abort the proposed change.
            -
            -
            Parameters:
            -
            changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
            -
            Returns:
            +
            +
            Parameters:
            +
            changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
            +
            Returns:
            The truth value determining if the state change should be aborted or not.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/composition/ComponentOwner.html b/docs/jdocs/neureka/common/composition/ComponentOwner.html index efedca0d6..d4d5a87ce 100644 --- a/docs/jdocs/neureka/common/composition/ComponentOwner.html +++ b/docs/jdocs/neureka/common/composition/ComponentOwner.html @@ -1,253 +1,369 @@ - + + - -ComponentOwner (neureka 1.0.0 API) - - - - + +ComponentOwner (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ComponentOwner<C>

    +
    neureka.common.composition
    +

    Interface ComponentOwner<C>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      C - The concrete type of the component implementing this interface.
      -
      +
      All Known Subinterfaces:
      -
      Tensor<V>
      +
      Tensor<V>
      -
      +
      All Known Implementing Classes:
      -
      AbstractComponentOwner, Args, Extensions
      +
      AbstractComponentOwner, Args, Extensions

      -
      public interface ComponentOwner<C>
      +
      +
      public interface ComponentOwner<C>
      A component owner is a thing holding components which can be accessed by their type class. This interface is used to create extensible APIs through flexible composition.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      <T extends Component>
      Optional<T>
      -
      find(Class<T> componentClass)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        <T extends Component>
        java.util.Optional<T>
        find(java.lang.Class<T> componentClass)
        This method finds a component of the given type/class - and returns it as an Optional which may or may not be empty.
        - -
        <T extends Component<?>>
        T
        -
        get(Class<T> componentClass)
        -
        + and returns it as an Optional which may or may not be empty.
        +
        <T extends Component<?>>
        T
        get(java.lang.Class<T> componentClass)
        Use this to get the component of the specified component type class.
        - -
        <T extends Component<?>>
        List<T>
        -
        getAll(Class<T> componentClass)
        -
        +
        <T extends Component<?>>
        java.util.List<T>
        getAll(java.lang.Class<T> componentClass)
        Use this to get all components of the specified component type class.
        - -
        <T extends Component<C>>
        boolean
        -
        has(Class<T> componentClass)
        -
        +
        <T extends Component<C>>
        boolean
        has(java.lang.Class<T> componentClass)
        Use this to check if a component of the specified component type class is present.
        - -
        <T extends Component<C>>
        C
        -
        remove(Class<T> componentClass)
        -
        +
        <T extends Component<C>>
        C
        remove(java.lang.Class<T> componentClass)
        Use this to remove a component of the specified component type class.
        - -
        <T extends Component<C>>
        C
        -
        set(T newComponent)
        -
        +
        <T extends Component<C>>
        C
        set(T newComponent)
        Use this to set a component.
        - - - - - +
      -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        get

        -
        <T extends Component<?>> T get(Class<T> componentClass)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            get

            +
            <T extends Component<?>> T get(java.lang.Class<T> componentClass)
            Use this to get the component of the specified component type class.
            -
            -
            Type Parameters:
            +
            +
            Type Parameters:
            T - The type of the component to be retrieved.
            -
            Parameters:
            +
            Parameters:
            componentClass - The class of the component to be retrieved.
            -
            Returns:
            +
            Returns:
            The component of the specified type class.
            -
      • -
      • -
        -

        getAll

        -
        <T extends Component<?>> List<T> getAll(Class<T> componentClass)
        +
      + + + +
        +
      • +

        getAll

        +
        <T extends Component<?>> java.util.List<T> getAll(java.lang.Class<T> componentClass)
        Use this to get all components of the specified component type class.
        -
        -
        Type Parameters:
        +
        +
        Type Parameters:
        T - The type of the components to be retrieved.
        -
        Parameters:
        +
        Parameters:
        componentClass - The class of the components to be retrieved.
        -
        Returns:
        +
        Returns:
        A list of all components of the specified type class.
        -
    • -
    • -
      -

      remove

      -
      <T extends Component<C>> C remove(Class<T> componentClass)
      +
    + + + +
      +
    • +

      remove

      +
      <T extends Component<C>> C remove(java.lang.Class<T> componentClass)
      Use this to remove a component of the specified component type class.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type of the component to be removed.
      -
      Parameters:
      +
      Parameters:
      componentClass - The class of the component to be removed.
      -
      Returns:
      +
      Returns:
      This component owner instance (to allow for method chaining if so desired).
      -
    • -
    • -
      -

      has

      -
      <T extends Component<C>> boolean has(Class<T> componentClass)
      +
    + + + +
      +
    • +

      has

      +
      <T extends Component<C>> boolean has(java.lang.Class<T> componentClass)
      Use this to check if a component of the specified component type class is present.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type of the component to be checked.
      -
      Parameters:
      +
      Parameters:
      componentClass - The class of the component to be checked.
      -
      Returns:
      +
      Returns:
      True if a component of the specified type class is present, false otherwise.
      -
    • -
    • -
      -

      set

      -
      <T extends Component<C>> C set(T newComponent)
      +
    + + + + + +
      +
    • +

      set

      +
      <T extends Component<C>> C set(T newComponent)
      Use this to set a component.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type of the component to be set.
      -
      Parameters:
      +
      Parameters:
      newComponent - The new component to be set.
      -
      Returns:
      +
      Returns:
      This component owner instance (to allow for method chaining if so desired).
      -
    • -
    • -
      -

      find

      -
      <T extends Component> Optional<T> find(Class<T> componentClass)
      +
    + + + +
      +
    • +

      find

      +
      <T extends Component> java.util.Optional<T> find(java.lang.Class<T> componentClass)
      This method finds a component of the given type/class - and returns it as an Optional which may or may not be empty.
      -
      -
      Type Parameters:
      + and returns it as an Optional which may or may not be empty.
    +
    +
    Type Parameters:
    T - The type parameter of the component which should be found. - This type must be a subtype of Component.
    -
    Parameters:
    + This type must be a subtype of Component. +
    Parameters:
    componentClass - The type/class of the component which should be found. - This class must be a subtype of Component.
    -
    Returns:
    -
    An Optional which may or may not be empty.
    + This class must be a subtype of Component. +
    Returns:
    +
    An Optional which may or may not be empty.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/composition/package-frame.html b/docs/jdocs/neureka/common/composition/package-frame.html new file mode 100644 index 000000000..f9fdc6858 --- /dev/null +++ b/docs/jdocs/neureka/common/composition/package-frame.html @@ -0,0 +1,29 @@ + + + + + +neureka.common.composition (neureka 1.0.1 API) + + + + +

    neureka.common.composition

    + + + diff --git a/docs/jdocs/neureka/common/composition/package-summary.html b/docs/jdocs/neureka/common/composition/package-summary.html index b7c91b4eb..21f97c17b 100644 --- a/docs/jdocs/neureka/common/composition/package-summary.html +++ b/docs/jdocs/neureka/common/composition/package-summary.html @@ -1,111 +1,192 @@ - + + - -neureka.common.composition (neureka 1.0.0 API) - - - - + +neureka.common.composition (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.common.composition

    -
    -
    -
    package neureka.common.composition
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/common/composition/package-tree.html b/docs/jdocs/neureka/common/composition/package-tree.html index 98e157347..94d436efd 100644 --- a/docs/jdocs/neureka/common/composition/package-tree.html +++ b/docs/jdocs/neureka/common/composition/package-tree.html @@ -1,93 +1,152 @@ - + + - -neureka.common.composition Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.common.composition Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.common.composition

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/common/utility/Cache.LazyEntry.html b/docs/jdocs/neureka/common/utility/Cache.LazyEntry.html index db0fb400b..46363ab25 100644 --- a/docs/jdocs/neureka/common/utility/Cache.LazyEntry.html +++ b/docs/jdocs/neureka/common/utility/Cache.LazyEntry.html @@ -1,197 +1,317 @@ - + + - -Cache.LazyEntry (neureka 1.0.0 API) - - - - + +Cache.LazyEntry (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Cache.LazyEntry<K,V>

    -
    -
    java.lang.Object -
    neureka.common.utility.Cache.LazyEntry<K,V>
    +
    neureka.common.utility
    +

    Class Cache.LazyEntry<K,V>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.Cache.LazyEntry<K,V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      K - The key type parameter.
      V - The value type parameter.
      -
      +
      Enclosing class:
      -
      Cache<O>
      +
      Cache<O>

      -
      public static class Cache.LazyEntry<K,V> -extends Object
      +
      +
      public static class Cache.LazyEntry<K,V>
      +extends java.lang.Object
      Lazy cache entries are entries whose values will be calculated only when the entry is being stored in the cache.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      LazyEntry(K directory, - Function<K,V> valueSupplier)
      -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        LazyEntry

        -
        public LazyEntry(K directory, - Function<K,V> valueSupplier)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + + + +
            +
          • +

            LazyEntry

            +
            public LazyEntry(K directory,
            +                 java.util.function.Function<K,V> valueSupplier)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getValue

      -
      public V getValue()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getValue

          +
          public V getValue()
        • -
        • -
          -

          equals

          -
          public boolean equals(Object o)
          -
          -
          Overrides:
          -
          equals in class Object
          +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object o)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          -
  • -
  • -
    -

    hashCode

    -
    public int hashCode()
    -
    -
    Overrides:
    -
    hashCode in class Object
    + + + + +
      +
    • +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/Cache.html b/docs/jdocs/neureka/common/utility/Cache.html index 5bf346d8a..5e7fc78c2 100644 --- a/docs/jdocs/neureka/common/utility/Cache.html +++ b/docs/jdocs/neureka/common/utility/Cache.html @@ -1,207 +1,333 @@ - + + - -Cache (neureka 1.0.0 API) - - - - + +Cache (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Cache<O>

    -
    -
    java.lang.Object -
    neureka.common.utility.Cache<O>
    +
    neureka.common.utility
    +

    Class Cache<O>

    -
    -
    -
    Type Parameters:
    -
    O - The type that should be cached, this may be an NDConfiguration or int[] array.
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.Cache<O>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      O - The type that should be cached, this may be an NDConfiguration or int[] array.

      -
      public final class Cache<O> -extends Object
      +
      +
      public final class Cache<O>
      +extends java.lang.Object
      This is a simple, fixed size cache for immutable objects which are shared throughout the library runtime... This is an internal class which should not be used outside Neurekas internals.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static class 
      - -
      +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class Cache.LazyEntry<K,V>
        Lazy cache entries are entries whose values will be calculated only when the entry is being stored in the cache.
        - - - +
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      Cache(int size)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Cache(int size) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      -
      has(O o)
      -
       
      -
      <T extends O>
      T
      -
      process(T newObject)
      -
       
      -
      int
      - -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanhas(O o) 
        <T extends O>
        T
        process(T newObject) 
        intsize() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      -
    • -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Cache

        -
        public Cache(int size)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Cache

            +
            public Cache(int size)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      process

      -
      public <T extends O> T process(T newObject)
      -
      -
      Parameters:
      +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          process

          +
          public <T extends O> T process(T newObject)
          +
          +
          Parameters:
          newObject - The object which may or may not be cached.
          -
          Returns:
          +
          Returns:
          Either the provided object or the object found inside the cache...
          -
    • -
    • -
      -

      has

      -
      public boolean has(O o)
      -
      +
    + + + + + +
      +
    • +

      has

      +
      public boolean has(O o)
    • -
    • -
      -

      size

      -
      public int size()
      -
      +
    + + + +
      +
    • +

      size

      +
      public int size()
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/DataConverter.ForTensor.html b/docs/jdocs/neureka/common/utility/DataConverter.ForTensor.html index af983e6d9..069e85421 100644 --- a/docs/jdocs/neureka/common/utility/DataConverter.ForTensor.html +++ b/docs/jdocs/neureka/common/utility/DataConverter.ForTensor.html @@ -1,220 +1,354 @@ - + + - -DataConverter.ForTensor (neureka 1.0.0 API) - - - - + +DataConverter.ForTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DataConverter.ForTensor

    +
    neureka.common.utility
    +

    Class DataConverter.ForTensor

    -
    java.lang.Object -
    neureka.common.utility.DataConverter.ForTensor
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.DataConverter.ForTensor
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      DataConverter
      +
      DataConverter

      -
      public static class DataConverter.ForTensor -extends Object
      +
      +
      public static class DataConverter.ForTensor
      +extends java.lang.Object
      This is a stateful and parallelized converter for converting the internal data array of a tensor to another data array based on a provided lambda. The converter will consider tensors with more complex access pattern like for example those of slices.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ForTensor

        -
        public ForTensor(Tensor<?> t)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ForTensor

            +
            public ForTensor(Tensor<?> t)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      toFloatArray

      -
      public float[] toFloatArray(Function<Integer,Number> source)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          toFloatArray

          +
          public float[] toFloatArray(java.util.function.Function<java.lang.Integer,java.lang.Number> source)
        • -
        • -
          -

          toByteArray

          -
          public byte[] toByteArray(Function<Integer,Number> source)
          -
          +
        + + + +
          +
        • +

          toByteArray

          +
          public byte[] toByteArray(java.util.function.Function<java.lang.Integer,java.lang.Number> source)
        • -
        • -
          -

          toLongArray

          -
          public long[] toLongArray(Function<Integer,Number> source)
          -
          +
        + + + +
          +
        • +

          toLongArray

          +
          public long[] toLongArray(java.util.function.Function<java.lang.Integer,java.lang.Number> source)
        • -
        • -
          -

          toIntArray

          -
          public int[] toIntArray(Function<Integer,Number> source)
          -
          +
        + + + +
          +
        • +

          toIntArray

          +
          public int[] toIntArray(java.util.function.Function<java.lang.Integer,java.lang.Number> source)
        • -
        • -
          -

          toDoubleArray

          -
          public double[] toDoubleArray(Function<Integer,Number> source)
          -
          +
        + + + +
          +
        • +

          toDoubleArray

          +
          public double[] toDoubleArray(java.util.function.Function<java.lang.Integer,java.lang.Number> source)
        • -
        • -
          -

          toShortArray

          -
          public short[] toShortArray(Function<Integer,Number> source)
          -
          +
        + + + +
          +
        • +

          toShortArray

          +
          public short[] toShortArray(java.util.function.Function<java.lang.Integer,java.lang.Number> source)
        • -
        • -
          -

          toObjectArray

          -
          public Object[] toObjectArray(Function<Integer,Object> source)
          -
          +
        + + + +
          +
        • +

          toObjectArray

          +
          public java.lang.Object[] toObjectArray(java.util.function.Function<java.lang.Integer,java.lang.Object> source)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/DataConverter.Utility.html b/docs/jdocs/neureka/common/utility/DataConverter.Utility.html index ae53effdb..ff3d3d913 100644 --- a/docs/jdocs/neureka/common/utility/DataConverter.Utility.html +++ b/docs/jdocs/neureka/common/utility/DataConverter.Utility.html @@ -1,670 +1,992 @@ - + + - -DataConverter.Utility (neureka 1.0.0 API) - - - - + +DataConverter.Utility (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DataConverter.Utility

    -
    -
    java.lang.Object -
    neureka.common.utility.DataConverter.Utility
    +
    neureka.common.utility
    +

    Class DataConverter.Utility

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.DataConverter.Utility
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      DataConverter
      +
      DataConverter

      -
      public static class DataConverter.Utility -extends Object
      +
      +
      public static class DataConverter.Utility
      +extends java.lang.Object
      This is a static utility class containing the actual conversion logic which is usually referenced by the Converter lambdas via method signatures... Other than that it also provides the ability to create seeded arrays of data.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Utility

        -
        public Utility()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Utility

            +
            public Utility()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      objFloatsToPrimFloats

      -
      public static float[] objFloatsToPrimFloats(Float[] objects)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          objFloatsToPrimFloats

          +
          public static float[] objFloatsToPrimFloats(java.lang.Float[] objects)
        • -
        • -
          -

          objDoublesToPrimDoubles

          -
          public static double[] objDoublesToPrimDoubles(Double[] objects)
          -
          +
        + + + +
          +
        • +

          objDoublesToPrimDoubles

          +
          public static double[] objDoublesToPrimDoubles(java.lang.Double[] objects)
        • -
        • -
          -

          objIntsToPrimInts

          -
          public static int[] objIntsToPrimInts(Integer[] objects)
          -
          +
        + + + +
          +
        • +

          objIntsToPrimInts

          +
          public static int[] objIntsToPrimInts(java.lang.Integer[] objects)
        • -
        • -
          -

          objLongsToPrimLongs

          -
          public static long[] objLongsToPrimLongs(Long[] objects)
          -
          +
        + + + +
          +
        • +

          objLongsToPrimLongs

          +
          public static long[] objLongsToPrimLongs(java.lang.Long[] objects)
        • -
        • -
          -

          objShortsToPrimShorts

          -
          public static short[] objShortsToPrimShorts(Short[] objects)
          -
          +
        + + + +
          +
        • +

          objShortsToPrimShorts

          +
          public static short[] objShortsToPrimShorts(java.lang.Short[] objects)
        • -
        • -
          -

          objBytesToPrimBytes

          -
          public static byte[] objBytesToPrimBytes(Byte[] objects)
          -
          +
        + + + +
          +
        • +

          objBytesToPrimBytes

          +
          public static byte[] objBytesToPrimBytes(java.lang.Byte[] objects)
        • -
        • -
          -

          objBooleansToPrimBooleans

          -
          public static boolean[] objBooleansToPrimBooleans(Boolean[] objects)
          -
          +
        + + + +
          +
        • +

          objBooleansToPrimBooleans

          +
          public static boolean[] objBooleansToPrimBooleans(java.lang.Boolean[] objects)
        • -
        • -
          -

          objCharsToPrimChars

          -
          public static char[] objCharsToPrimChars(Character[] objects)
          -
          +
        + + + +
          +
        • +

          objCharsToPrimChars

          +
          public static char[] objCharsToPrimChars(java.lang.Character[] objects)
        • -
        • -
          -

          byteToShort

          -
          public static short[] byteToShort(byte[] data)
          -
          +
        + + + +
          +
        • +

          byteToShort

          +
          public static short[] byteToShort(byte[] data)
        • -
        • -
          -

          byteToBigInteger

          -
          public static BigInteger[] byteToBigInteger(byte[] data)
          -
          +
        + + + +
          +
        • +

          byteToBigInteger

          +
          public static java.math.BigInteger[] byteToBigInteger(byte[] data)
        • -
        • -
          -

          doubleToFloat

          -
          public static float[] doubleToFloat(double[] data)
          -
          +
        + + + +
          +
        • +

          doubleToFloat

          +
          public static float[] doubleToFloat(double[] data)
        • -
        • -
          -

          doubleToByte

          -
          public static byte[] doubleToByte(double[] data)
          -
          +
        + + + +
          +
        • +

          doubleToByte

          +
          public static byte[] doubleToByte(double[] data)
        • -
        • -
          -

          doubleToShort

          -
          public static short[] doubleToShort(double[] data)
          -
          +
        + + + +
          +
        • +

          doubleToShort

          +
          public static short[] doubleToShort(double[] data)
        • -
        • -
          -

          doubleToLong

          -
          public static long[] doubleToLong(double[] data)
          -
          +
        + + + +
          +
        • +

          doubleToLong

          +
          public static long[] doubleToLong(double[] data)
        • -
        • -
          -

          doubleToBool

          -
          public static boolean[] doubleToBool(double[] data)
          -
          +
        + + + +
          +
        • +

          doubleToBool

          +
          public static boolean[] doubleToBool(double[] data)
        • -
        • -
          -

          boolToDouble

          -
          public static double[] boolToDouble(boolean[] data)
          -
          +
        + + + +
          +
        • +

          boolToDouble

          +
          public static double[] boolToDouble(boolean[] data)
        • -
        • -
          -

          boolToFloat

          -
          public static float[] boolToFloat(boolean[] data)
          -
          +
        + + + +
          +
        • +

          boolToFloat

          +
          public static float[] boolToFloat(boolean[] data)
        • -
        • -
          -

          floatToDouble

          -
          public static double[] floatToDouble(float[] data)
          -
          +
        + + + +
          +
        • +

          floatToDouble

          +
          public static double[] floatToDouble(float[] data)
        • -
        • -
          -

          floatToByte

          -
          public static byte[] floatToByte(float[] data)
          -
          +
        + + + +
          +
        • +

          floatToByte

          +
          public static byte[] floatToByte(float[] data)
        • -
        • -
          -

          floatToShort

          -
          public static short[] floatToShort(float[] data)
          -
          +
        + + + +
          +
        • +

          floatToShort

          +
          public static short[] floatToShort(float[] data)
        • -
        • -
          -

          floatToLong

          -
          public static long[] floatToLong(float[] data)
          -
          +
        + + + +
          +
        • +

          floatToLong

          +
          public static long[] floatToLong(float[] data)
        • -
        • -
          -

          shortToDouble

          -
          public static double[] shortToDouble(short[] data)
          -
          +
        + + + +
          +
        • +

          shortToDouble

          +
          public static double[] shortToDouble(short[] data)
        • -
        • -
          -

          byteToDouble

          -
          public static double[] byteToDouble(byte[] data)
          -
          +
        + + + +
          +
        • +

          byteToDouble

          +
          public static double[] byteToDouble(byte[] data)
        • -
        • -
          -

          byteToFloat

          -
          public static float[] byteToFloat(byte[] data)
          -
          +
        + + + +
          +
        • +

          byteToFloat

          +
          public static float[] byteToFloat(byte[] data)
        • -
        • -
          -

          shortToFloat

          -
          public static float[] shortToFloat(short[] data)
          -
          +
        + + + +
          +
        • +

          shortToFloat

          +
          public static float[] shortToFloat(short[] data)
        • -
        • -
          -

          byteToInt

          -
          public static int[] byteToInt(byte[] data)
          -
          +
        + + + +
          +
        • +

          byteToInt

          +
          public static int[] byteToInt(byte[] data)
        • -
        • -
          -

          shortToInt

          -
          public static int[] shortToInt(short[] data)
          -
          +
        + + + +
          +
        • +

          shortToInt

          +
          public static int[] shortToInt(short[] data)
        • -
        • -
          -

          shortToByte

          -
          public static byte[] shortToByte(short[] data)
          -
          +
        + + + +
          +
        • +

          shortToByte

          +
          public static byte[] shortToByte(short[] data)
        • -
        • -
          -

          byteToLong

          -
          public static long[] byteToLong(byte[] data)
          -
          +
        + + + +
          +
        • +

          byteToLong

          +
          public static long[] byteToLong(byte[] data)
        • -
        • -
          -

          shortToLong

          -
          public static long[] shortToLong(short[] data)
          -
          +
        + + + +
          +
        • +

          shortToLong

          +
          public static long[] shortToLong(short[] data)
        • -
        • -
          -

          shortToBigInteger

          -
          public static BigInteger[] shortToBigInteger(short[] data)
          -
          +
        + + + +
          +
        • +

          shortToBigInteger

          +
          public static java.math.BigInteger[] shortToBigInteger(short[] data)
        • -
        • -
          -

          intToFloat

          -
          public static float[] intToFloat(int[] data)
          -
          +
        + + + +
          +
        • +

          intToFloat

          +
          public static float[] intToFloat(int[] data)
        • -
        • -
          -

          floatToInt

          -
          public static int[] floatToInt(float[] data)
          -
          +
        + + + +
          +
        • +

          floatToInt

          +
          public static int[] floatToInt(float[] data)
        • -
        • -
          -

          floatToBigInteger

          -
          public static BigInteger[] floatToBigInteger(float[] data)
          -
          +
        + + + +
          +
        • +

          floatToBigInteger

          +
          public static java.math.BigInteger[] floatToBigInteger(float[] data)
        • -
        • -
          -

          doubleToInt

          -
          public static int[] doubleToInt(double[] data)
          -
          +
        + + + +
          +
        • +

          doubleToInt

          +
          public static int[] doubleToInt(double[] data)
        • -
        • -
          -

          doubleToBigInteger

          -
          public static BigInteger[] doubleToBigInteger(double[] data)
          -
          +
        + + + +
          +
        • +

          doubleToBigInteger

          +
          public static java.math.BigInteger[] doubleToBigInteger(double[] data)
        • -
        • -
          -

          intToDouble

          -
          public static double[] intToDouble(int[] data)
          -
          +
        + + + +
          +
        • +

          intToDouble

          +
          public static double[] intToDouble(int[] data)
        • -
        • -
          -

          intToLong

          -
          public static long[] intToLong(int[] data)
          -
          +
        + + + +
          +
        • +

          intToLong

          +
          public static long[] intToLong(int[] data)
        • -
        • -
          -

          intToShort

          -
          public static short[] intToShort(int[] data)
          -
          +
        + + + +
          +
        • +

          intToShort

          +
          public static short[] intToShort(int[] data)
        • -
        • -
          -

          intToByte

          -
          public static byte[] intToByte(int[] data)
          -
          +
        + + + +
          +
        • +

          intToByte

          +
          public static byte[] intToByte(int[] data)
        • -
        • -
          -

          intToBigInteger

          -
          public static BigInteger[] intToBigInteger(int[] data)
          -
          +
        + + + +
          +
        • +

          intToBigInteger

          +
          public static java.math.BigInteger[] intToBigInteger(int[] data)
        • -
        • -
          -

          longToByte

          -
          public static byte[] longToByte(long[] data)
          -
          +
        + + + +
          +
        • +

          longToByte

          +
          public static byte[] longToByte(long[] data)
        • -
        • -
          -

          longToShort

          -
          public static short[] longToShort(long[] data)
          -
          +
        + + + +
          +
        • +

          longToShort

          +
          public static short[] longToShort(long[] data)
        • -
        • -
          -

          longToInt

          -
          public static int[] longToInt(long[] data)
          -
          +
        + + + +
          +
        • +

          longToInt

          +
          public static int[] longToInt(long[] data)
        • -
        • -
          -

          longToFloat

          -
          public static float[] longToFloat(long[] data)
          -
          +
        + + + +
          +
        • +

          longToFloat

          +
          public static float[] longToFloat(long[] data)
        • -
        • -
          -

          longToDouble

          -
          public static double[] longToDouble(long[] data)
          -
          +
        + + + +
          +
        • +

          longToDouble

          +
          public static double[] longToDouble(long[] data)
        • -
        • -
          -

          longToBigInteger

          -
          public static BigInteger[] longToBigInteger(long[] data)
          -
          +
        + + + +
          +
        • +

          longToBigInteger

          +
          public static java.math.BigInteger[] longToBigInteger(long[] data)
        • -
        • -
          -

          objectsToDoubles

          -
          public static double[] objectsToDoubles(Object[] objects, - int targetSize)
          -
          +
        + + + +
          +
        • +

          objectsToDoubles

          +
          public static double[] objectsToDoubles(java.lang.Object[] objects,
          +                                        int targetSize)
        • -
        • -
          -

          objectsToFloats

          -
          public static float[] objectsToFloats(Object[] objects, - int targetSize)
          -
          +
        + + + +
          +
        • +

          objectsToFloats

          +
          public static float[] objectsToFloats(java.lang.Object[] objects,
          +                                      int targetSize)
        • -
        • -
          -

          objectsToShorts

          -
          public static short[] objectsToShorts(Object[] objects, - int targetSize)
          -
          +
        + + + +
          +
        • +

          objectsToShorts

          +
          public static short[] objectsToShorts(java.lang.Object[] objects,
          +                                      int targetSize)
        • -
        • -
          -

          objectsToBytes

          -
          public static byte[] objectsToBytes(Object[] objects, - int targetSize)
          -
          +
        + + + +
          +
        • +

          objectsToBytes

          +
          public static byte[] objectsToBytes(java.lang.Object[] objects,
          +                                    int targetSize)
        • -
        • -
          -

          objectsToLongs

          -
          public static long[] objectsToLongs(Object[] objects, - int targetSize)
          -
          +
        + + + +
          +
        • +

          objectsToLongs

          +
          public static long[] objectsToLongs(java.lang.Object[] objects,
          +                                    int targetSize)
        • -
        • -
          -

          objectsToInts

          -
          public static int[] objectsToInts(Object[] objects, - int targetSize)
          -
          +
        + + + +
          +
        • +

          objectsToInts

          +
          public static int[] objectsToInts(java.lang.Object[] objects,
          +                                  int targetSize)
        • -
        • -
          -

          intStream

          -
          public static IntStream intStream(int parallelThreshold, - int workload)
          -
          Use this to create a range based IntStream +
        + + + +
          +
        • +

          intStream

          +
          public static java.util.stream.IntStream intStream(int parallelThreshold,
          +                                                   int workload)
          +
          Use this to create a range based IntStream which is only parallel if the provided threshold smaller than the provided workload size.
          -
          -
          Parameters:
          +
          +
          Parameters:
          parallelThreshold - If the workload is larger than the threshold then the returned stream will be parallel.
          workload - The number of integers processed by the returned stream.
          -
          Returns:
          -
          A sequential or parallel IntStream.
          +
          Returns:
          +
          A sequential or parallel IntStream.
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/DataConverter.html b/docs/jdocs/neureka/common/utility/DataConverter.html index 0d956c620..66a32b7f6 100644 --- a/docs/jdocs/neureka/common/utility/DataConverter.html +++ b/docs/jdocs/neureka/common/utility/DataConverter.html @@ -1,194 +1,306 @@ - + + - -DataConverter (neureka 1.0.0 API) - - - - + +DataConverter (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DataConverter

    -
    -
    java.lang.Object -
    neureka.common.utility.DataConverter
    +
    neureka.common.utility
    +

    Class DataConverter

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.DataConverter
      • +
      +
    • +
    +
    +
      +

    • -
      public final class DataConverter -extends Object
      +
      +
      public final class DataConverter
      +extends java.lang.Object
      This class is a singleton. Its sole job is to simply take in any kind ob object and convert it into another object of a provided Class type... - In essence the DataConverter is merely a utility class. - It also contains a nested static class named DataConverter.Utility which + In essence the DataConverter is merely a utility class. + It also contains a nested static class named DataConverter.Utility which provides useful methods to handle primitive data types and arrays of said types.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static class 
      - -
      +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class DataConverter.ForTensor
        This is a stateful and parallelized converter for converting the internal data array of a tensor to another data array based on a provided lambda.
        - -
        static class 
        - -
        +
        static class DataConverter.Utility
        This is a static utility class containing the actual conversion logic which is usually referenced by the Converter lambdas via method signatures...
        - - - +
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      <T> T
      -
      convert(Object from, - Class<T> to)
      -
      +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        get

        -
        public static DataConverter get()
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            get

            +
            public static DataConverter get()
            This method returns the singleton.
            -
            -
            Returns:
            +
            +
            Returns:
            The singleton instance of this class.
            -
      • -
      • -
        -

        convert

        -
        public <T> T convert(Object from, - Class<T> to)
        +
      + + + +
        +
      • +

        convert

        +
        public <T> T convert(java.lang.Object from,
        +                     java.lang.Class<T> to)
        This method embodies the purpose of this class. It receives objects for type conversion and queries the request through the nested "_converters" Map instance.
        -
        -
        Type Parameters:
        +
        +
        Type Parameters:
        T - The type parameter of the "to" Class.
        -
        Parameters:
        +
        Parameters:
        from - The object which ought to be converted.
        to - The target type for the provided object.
        -
        Returns:
        +
        Returns:
        The target object created by a Converter lambda.
        -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/ListReader.Result.html b/docs/jdocs/neureka/common/utility/ListReader.Result.html index ffdbe9ab5..279794b12 100644 --- a/docs/jdocs/neureka/common/utility/ListReader.Result.html +++ b/docs/jdocs/neureka/common/utility/ListReader.Result.html @@ -1,153 +1,264 @@ - + + - -ListReader.Result (neureka 1.0.0 API) - - - - + +ListReader.Result (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ListReader.Result

    +
    neureka.common.utility
    +

    Class ListReader.Result

    -
    java.lang.Object -
    neureka.common.utility.ListReader.Result
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.ListReader.Result
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
       
      - - -
       
      - - -
       
      -
      -
      +
      +
      public static class ListReader.Result
      +extends java.lang.Object
      +
    • +
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.util.List<java.lang.Object>getData() 
        java.util.List<java.lang.Integer>getShape() 
        java.lang.Class<?>getType() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      - -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        getType

        -
        public Class<?> getType()
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            getType

            +
            public java.lang.Class<?> getType()
          • -
          • -
            -

            getShape

            -
            public List<Integer> getShape()
            -
            +
          + + + +
            +
          • +

            getShape

            +
            public java.util.List<java.lang.Integer> getShape()
          • -
          • -
            -

            getData

            -
            public List<Object> getData()
            -
            +
          + + + +
            +
          • +

            getData

            +
            public java.util.List<java.lang.Object> getData()
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/ListReader.html b/docs/jdocs/neureka/common/utility/ListReader.html index 275ff5990..6bb593f9e 100644 --- a/docs/jdocs/neureka/common/utility/ListReader.html +++ b/docs/jdocs/neureka/common/utility/ListReader.html @@ -1,165 +1,272 @@ - + + - -ListReader (neureka 1.0.0 API) - - - - + +ListReader (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ListReader

    +
    neureka.common.utility
    +

    Class ListReader

    -
    java.lang.Object -
    neureka.common.utility.ListReader
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.ListReader
      • +
      +
    • +
    +
    +
      +

    • -
      public final class ListReader -extends Object
      +
      +
      public final class ListReader
      +extends java.lang.Object
      This is a simple utility class which traverses nested data structures and converts them into information which can be used to instantiate a tensor, namely: A flat data array, a shape array and a type class.
      -
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static class 
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        read

        -
        public static ListReader.Result read(List<Object> data, - Function<Object,Object> valueFilter)
        -
        Reads the provided data and turns it into a ListReader.Result object, +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            read

            +
            public static ListReader.Result read(java.util.List<java.lang.Object> data,
            +                                     java.util.function.Function<java.lang.Object,java.lang.Object> valueFilter)
            +
            Reads the provided data and turns it into a ListReader.Result object, containing a flattened list of the data alongside its shape and data type.
            -
            -
            Parameters:
            +
            +
            Parameters:
            data - A list of data elements or nested lists with an arbitrary degree of nesting.
            valueFilter - A filter for the elements in the provided data list.
            -
            Returns:
            +
            Returns:
            The result object containing data, data type and shape information.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/LogUtil.html b/docs/jdocs/neureka/common/utility/LogUtil.html index 430a21ef6..80fec3791 100644 --- a/docs/jdocs/neureka/common/utility/LogUtil.html +++ b/docs/jdocs/neureka/common/utility/LogUtil.html @@ -1,185 +1,301 @@ - + + - -LogUtil (neureka 1.0.0 API) - - - - + +LogUtil (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class LogUtil

    +
    neureka.common.utility
    +

    Class LogUtil

    -
    java.lang.Object -
    neureka.common.utility.LogUtil
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.LogUtil
      • +
      +
    • +
    +
    +
      +

    • -
      public final class LogUtil -extends Object
      +
      +
      public final class LogUtil
      +extends java.lang.Object
      A utility class for message formatting.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        LogUtil() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static String
      -
      format(String withPlaceholders, - Object... toBePutAtPlaceholders)
      -
       
      -
      static <T> void
      -
      nullArgCheck(T var, - String thing, - Class<?> type, - String... notes)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static java.lang.Stringformat(java.lang.String withPlaceholders, + java.lang.Object... toBePutAtPlaceholders) 
        static <T> voidnullArgCheck(T var, + java.lang.String thing, + java.lang.Class<?> type, + java.lang.String... notes) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        LogUtil

        -
        public LogUtil()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            LogUtil

            +
            public LogUtil()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      format

      -
      public static String format(String withPlaceholders, - Object... toBePutAtPlaceholders)
      -
      -
      Parameters:
      -
      withPlaceholders - The String which may or may not contain placeholder in the for of "{}".
      -
      toBePutAtPlaceholders - Arbitrary Objects which will be turned into - Strings instead of the placeholder brackets.
      -
      Returns:
      -
      A String containing the actual String representations of th Objects +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          format

          +
          public static java.lang.String format(java.lang.String withPlaceholders,
          +                                      java.lang.Object... toBePutAtPlaceholders)
          +
          +
          Parameters:
          +
          withPlaceholders - The String which may or may not contain placeholder in the for of "{}".
          +
          toBePutAtPlaceholders - Arbitrary Objects which will be turned into + Strings instead of the placeholder brackets.
          +
          Returns:
          +
          A String containing the actual String representations of th Objects instead of the placeholder brackets within the first argument.
          -
    • -
    • -
      -

      nullArgCheck

      -
      public static <T> void nullArgCheck(T var, - String thing, - Class<?> type, - String... notes)
      -
      +
    + + + + + +
      +
    • +

      nullArgCheck

      +
      public static <T> void nullArgCheck(T var,
      +                                    java.lang.String thing,
      +                                    java.lang.Class<?> type,
      +                                    java.lang.String... notes)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/SettingsLoader.html b/docs/jdocs/neureka/common/utility/SettingsLoader.html index e7c7fbc70..9f29ce48f 100644 --- a/docs/jdocs/neureka/common/utility/SettingsLoader.html +++ b/docs/jdocs/neureka/common/utility/SettingsLoader.html @@ -1,166 +1,277 @@ - + + - -SettingsLoader (neureka 1.0.0 API) - - - - + +SettingsLoader (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SettingsLoader

    +
    neureka.common.utility
    +

    Class SettingsLoader

    -
    java.lang.Object -
    neureka.common.utility.SettingsLoader
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.common.utility.SettingsLoader
      • +
      +
    • +
    +
    +
      +

    • -
      public final class SettingsLoader -extends Object
      -
      This class is a helper class for Neureka instances (Thread local singletons). +
      +
      public final class SettingsLoader
      +extends java.lang.Object
      +
      This class is a helper class for Neureka instances (Thread local singletons). It loads the settings property file and interprets its contents - which are then translated to the Neureka.Settings.
      -
    -
    -
    + + +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        loadProperties

        -
        public static void loadProperties(Neureka instance)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            loadProperties

            +
            public static void loadProperties(Neureka instance)
          • -
          • -
            -

            tryGroovyClosureOn

            -
            public static Object tryGroovyClosureOn(Object closure, - Object delegate)
            +
          + + + +
            +
          • +

            tryGroovyClosureOn

            +
            public static java.lang.Object tryGroovyClosureOn(java.lang.Object closure,
            +                                                  java.lang.Object delegate)
            This method makes it possible to configure the library via a Groovy DSL!
            -
            -
            Parameters:
            +
            +
            Parameters:
            closure - A Groovy closure which should be called with the provided delegate.
            delegate - The delegate for the provided closure (Can be a settings object).
            -
            Returns:
            +
            Returns:
            The result returned by provided closure.
            -
    • -
    • -
      -

      tryGroovyScriptsOn

      -
      public static void tryGroovyScriptsOn(Neureka instance, - Consumer<String> scriptConsumer)
      -
      +
    + + + +
      +
    • +

      tryGroovyScriptsOn

      +
      public static void tryGroovyScriptsOn(Neureka instance,
      +                                      java.util.function.Consumer<java.lang.String> scriptConsumer)
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/common/utility/package-frame.html b/docs/jdocs/neureka/common/utility/package-frame.html new file mode 100644 index 000000000..5a195ac37 --- /dev/null +++ b/docs/jdocs/neureka/common/utility/package-frame.html @@ -0,0 +1,27 @@ + + + + + +neureka.common.utility (neureka 1.0.1 API) + + + + +

    neureka.common.utility

    + + + diff --git a/docs/jdocs/neureka/common/utility/package-summary.html b/docs/jdocs/neureka/common/utility/package-summary.html index e7298c520..bd8e2d8f8 100644 --- a/docs/jdocs/neureka/common/utility/package-summary.html +++ b/docs/jdocs/neureka/common/utility/package-summary.html @@ -1,120 +1,193 @@ - + + - -neureka.common.utility (neureka 1.0.0 API) - - - - + +neureka.common.utility (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.common.utility

    -
    -
    -
    package neureka.common.utility
    -
    -
      -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      +

      Package neureka.common.utility

      +
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        Cache<O>
        This is a simple, fixed size cache for immutable objects which are shared throughout the library runtime...
        - - -
        +
        Cache.LazyEntry<K,V>
        Lazy cache entries are entries whose values will be calculated only when the entry is being stored in the cache.
        - - -
        +
        DataConverter
        This class is a singleton.
        - - -
        +
        DataConverter.ForTensor
        This is a stateful and parallelized converter for converting the internal data array of a tensor to another data array based on a provided lambda.
        - - -
        +
        DataConverter.Utility
        This is a static utility class containing the actual conversion logic which is usually referenced by the Converter lambdas via method signatures...
        - - -
        +
        ListReader
        This is a simple utility class which traverses nested data structures and converts them into information which can be used to instantiate a tensor, namely: A flat data array, a shape array and a type class.
        - - -
         
        - -
        +
        ListReader.Result 
        LogUtil
        A utility class for message formatting.
        - - -
        -
        This class is a helper class for Neureka instances (Thread local singletons).
        -
        - - +
        SettingsLoader +
        This class is a helper class for Neureka instances (Thread local singletons).
        +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/common/utility/package-tree.html b/docs/jdocs/neureka/common/utility/package-tree.html index d8c6e0a37..ee2799270 100644 --- a/docs/jdocs/neureka/common/utility/package-tree.html +++ b/docs/jdocs/neureka/common/utility/package-tree.html @@ -1,79 +1,142 @@ - + + - -neureka.common.utility Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.common.utility Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.common.utility

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/AbstractBaseDevice.html b/docs/jdocs/neureka/devices/AbstractBaseDevice.html index 002d551b7..013f2838e 100644 --- a/docs/jdocs/neureka/devices/AbstractBaseDevice.html +++ b/docs/jdocs/neureka/devices/AbstractBaseDevice.html @@ -1,318 +1,477 @@ - + + - -AbstractBaseDevice (neureka 1.0.0 API) - - - - + +AbstractBaseDevice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractBaseDevice<V>

    -
    -
    java.lang.Object -
    neureka.devices.AbstractBaseDevice<V>
    +
    neureka.devices
    +

    Class AbstractBaseDevice<V>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.AbstractBaseDevice<V>
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public abstract class AbstractBaseDevice<V>
      +extends java.lang.Object
      +implements Device<V>
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        _numberOfTensors

        -
        protected int _numberOfTensors
        -
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            _numberOfTensors

            +
            protected int _numberOfTensors
          • -
          • -
            -

            _numberOfDataObjects

            -
            protected int _numberOfDataObjects
            -
            +
          + + + +
            +
          • +

            _numberOfDataObjects

            +
            protected int _numberOfDataObjects
          -
    • +
    -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      AbstractBaseDevice

      -
      public AbstractBaseDevice()
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          AbstractBaseDevice

          +
          public AbstractBaseDevice()
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      numberOfStored

      -
      public int numberOfStored()
      -
      -
      Specified by:
      -
      numberOfStored in interface Storage<V>
      -
      Returns:
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          numberOfStored

          +
          public int numberOfStored()
          +
          +
          Specified by:
          +
          numberOfStored in interface Storage<V>
          +
          Returns:
          The number of nd-array stored on this.
          -
    • -
    • -
      -

      numberOfDataObjects

      -
      public int numberOfDataObjects()
      -
      Description copied from interface: Device
      -
      Note that this is not necessarily equal to Storage.numberOfStored(), because - multiple tensors may share a single Data object.
      -
      -
      Specified by:
      -
      numberOfDataObjects in interface Device<V>
      -
      Returns:
      -
      The number of Data objects stored on this Device.
      +
    + + + +
      +
    • +

      numberOfDataObjects

      +
      public int numberOfDataObjects()
      +
      Description copied from interface: Device
      +
      Note that this is not necessarily equal to Storage.numberOfStored(), because + multiple tensors may share a single Data object.
      +
      +
      Specified by:
      +
      numberOfDataObjects in interface Device<V>
      +
      Returns:
      +
      The number of Data objects stored on this Device.
      -
  • -
  • -
    -

    isEmpty

    -
    public boolean isEmpty()
    + + + + +
      +
    • +

      isEmpty

      +
      public boolean isEmpty()
      A device is empty if there are no tensors stored on it.
      -
      -
      Specified by:
      -
      isEmpty in interface Storage<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      isEmpty in interface Storage<V>
      +
      Returns:
      The truth value determining if there are no tensors stored on this device.
      -
  • -
  • -
    -

    contains

    -
    public final boolean contains(Tensor<V> o)
    -
    -
    Specified by:
    -
    contains in interface Storage<V>
    -
    Parameters:
    + + + + +
      +
    • +

      contains

      +
      public final boolean contains(Tensor<V> o)
      +
      +
      Specified by:
      +
      contains in interface Storage<V>
      +
      Parameters:
      o - The tensor which may or may not be stored on this.
      -
      Returns:
      +
      Returns:
      The truth value determining if the provided tensor is stored on this.
      -
  • -
  • -
    -

    has

    -
    public <T extends V> boolean has(Tensor<T> tensor)
    + + + + +
      +
    • +

      has

      +
      public <T extends V> boolean has(Tensor<T> tensor)
      This method checks if the passed tensor - is stored on this Device instance. + is stored on this Device instance. "Stored" means that the data of the tensor was created by this device. This data is referenced inside the tensor...
      -
      -
      Specified by:
      -
      has in interface Device<V>
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      -
      Parameters:
      +
      +
      Specified by:
      +
      has in interface Device<V>
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      +
      Parameters:
      tensor - The tensor in question.
      -
      Returns:
      +
      Returns:
      The truth value of the fact that the provided tensor is on this device.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/AbstractDevice.html b/docs/jdocs/neureka/devices/AbstractDevice.html index 0c578e7ba..d70dd31d6 100644 --- a/docs/jdocs/neureka/devices/AbstractDevice.html +++ b/docs/jdocs/neureka/devices/AbstractDevice.html @@ -1,478 +1,684 @@ - + + - -AbstractDevice (neureka 1.0.0 API) - - - - + +AbstractDevice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractDevice<V>

    +
    neureka.devices
    +

    Class AbstractDevice<V>

    -
    java.lang.Object -
    neureka.devices.AbstractBaseDevice<V> -
    neureka.devices.AbstractDevice<V>
    -
    -
    -
    -
    -
    Type Parameters:
    +
    + +
    +
    -
    -
      + applicable to most concrete implementations of the Device interface.
    + + +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        _log

        -
        protected org.slf4j.Logger _log
        -
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            _log

            +
            protected org.slf4j.Logger _log
          -
    • +
    -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      AbstractDevice

      -
      protected AbstractDevice()
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          AbstractDevice

          +
          protected AbstractDevice()
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      _approveExecutionOf

      -
      protected abstract boolean _approveExecutionOf(Tensor<?>[] tensors, - int d, - Operation type)
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          _approveExecutionOf

          +
          protected abstract boolean _approveExecutionOf(Tensor<?>[] tensors,
          +                                               int d,
          +                                               Operation type)
          This method is the internal approval routine called by its public counterpart and implemented by classes extending this very abstract class. - It may or may not be called by an Algorithm - in order to allow a Device to checked if the provided arguments are suitable for execution.
          -
          -
          Parameters:
          + It may or may not be called by an Algorithm + in order to allow a Device to checked if the provided arguments are suitable for execution.
  • +
    +
    Parameters:
    tensors - An array of input tensors.
    d - The index of the input which ought to be derived.
    type - The type of operation.
    -
    Returns:
    +
    Returns:
    The truth value determining if the provided arguments can be executed.
    - -
  • -
    -

    update

    -
    public boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
    -
    A Device is a component of a tensor. This method is used to inform the device + + + + +
      +
    • +

      update

      +
      public boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
      +
      A Device is a component of a tensor. This method is used to inform the device that the device is being added, removed or replaced (from the tensor).
      -
      -
      Parameters:
      -
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      -
      Returns:
      +
      +
      Parameters:
      +
      changeRequest - An OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      +
      Returns:
      The truth value determining if the change should be executed.
      -
  • -
  • -
    -

    _cleaning

    -
    protected void _cleaning(Object o, - Runnable action)
    -
    + + + + +
      +
    • +

      _cleaning

      +
      protected void _cleaning(java.lang.Object o,
      +                         java.lang.Runnable action)
    • -
    • -
      -

      approve

      -
      public Device<V> approve(ExecutionCall<? extends Device<?>> call)
      -
      This method plays an important role in approving a provided ExecutionCall. +
    + + + +
      +
    • +

      approve

      +
      public Device<V> approve(ExecutionCall<? extends Device<?>> call)
      +
      This method plays an important role in approving a provided ExecutionCall. When implementing custom operations or such for the backend of this library, then one may use - this in order to check if the provided call is suitable for this Device.
      -
      -
      Parameters:
      + this in order to check if the provided call is suitable for this Device.
  • +
    +
    Parameters:
    call - The execution call object containing tensor arguments and settings for the device to approve.
    -
    Returns:
    +
    Returns:
    This very device instance in order to enable method chaining.
    - -
  • -
    -

    store

    -
    public <T extends V> Storage<V> store(Tensor<T> tensor)
    + + + + +
      +
    • +

      store

      +
      public <T extends VStorage<V> store(Tensor<T> tensor)
      Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type. Classes like "OpenCLDevice" or "FileDevice" for example are tensor storages.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - A valid data type of the tensor which should be stored on this device.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor whose data ought to be stored.
      -
      Returns:
      -
      This Storage instance, to allow for method chaining.
      +
      Returns:
      +
      This Storage instance, to allow for method chaining.
      -
  • -
  • -
    -

    access

    -
    public <T extends V> Device.Access<T> access(Tensor<T> tensor)
    + + + + +
      +
    • +

      access

      +
      public <T extends VDevice.Access<T> access(Tensor<T> tensor)
      This method exposes the tensor access API for reading from or writing to a tensor stored on this device. It may return null if this device does not support accessing stored tensors.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter of the tensor for which the access API should be returned.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor whose data ought to be accessed.
      -
      Returns:
      +
      Returns:
      The tensor access API for reading from or writing to a tensor stored on this device.
      -
  • -
  • -
    -

    _swap

    -
    protected abstract <T extends V> void _swap(Tensor<T> former, - Tensor<T> replacement)
    + + + + +
      +
    • +

      _swap

      +
      protected abstract <T extends V> void _swap(Tensor<T> former,
      +                                            Tensor<T> replacement)
      This method is used internally mostly and should not be used in most cases.

      -
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensors, which must be supported by this Device.
      -
      Parameters:
      +
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensors, which must be supported by this Device.
      +
      Parameters:
      former - The tensor whose associated data (on the device) ought to be assigned to the other tensor.
      replacement - The tensor which ought to receive the data of the former tensor internally.
      -
  • -
  • -
    -

    _sizeOccupiedBy

    -
    protected abstract <T extends V> int _sizeOccupiedBy(Tensor<T> tensor)
    -
    + + + + +
      +
    • +

      _sizeOccupiedBy

      +
      protected abstract <T extends V> int _sizeOccupiedBy(Tensor<T> tensor)
    • -
    • -
      -

      _readAll

      -
      protected abstract <T extends V> Object _readAll(Tensor<T> tensor, - boolean clone)
      -
      +
    + + + +
      +
    • +

      _readAll

      +
      protected abstract <T extends V> java.lang.Object _readAll(Tensor<T> tensor,
      +                                                           boolean clone)
    • -
    • -
      -

      _readItem

      -
      protected abstract <T extends V> T _readItem(Tensor<T> tensor, - int index)
      -
      +
    + + + +
      +
    • +

      _readItem

      +
      protected abstract <T extends V> T _readItem(Tensor<T> tensor,
      +                                             int index)
    • -
    • -
      -

      _readArray

      -
      protected abstract <T extends V, -A> A _readArray(Tensor<T> tensor, - Class<A> arrayType, - int start, - int size)
      -
      +
    + + + +
      +
    • +

      _readArray

      +
      protected abstract <T extends V,A> A _readArray(Tensor<T> tensor,
      +                                                java.lang.Class<A> arrayType,
      +                                                int start,
      +                                                int size)
    • -
    • -
      -

      _writeItem

      -
      protected abstract <T extends V> void _writeItem(Tensor<T> tensor, - T item, - int start, - int size)
      -
      +
    + + + + + +
      +
    • +

      _writeItem

      +
      protected abstract <T extends V> void _writeItem(Tensor<T> tensor,
      +                                                 T item,
      +                                                 int start,
      +                                                 int size)
    • -
    • -
      -

      _writeArray

      -
      protected abstract <T extends V> void _writeArray(Tensor<T> tensor, - Object array, - int offset, - int start, - int size)
      -
      +
    + + + +
      +
    • +

      _writeArray

      +
      protected abstract <T extends V> void _writeArray(Tensor<T> tensor,
      +                                                  java.lang.Object array,
      +                                                  int offset,
      +                                                  int start,
      +                                                  int size)
    • -
    • -
      -

      _actualize

      -
      protected abstract Data<V> _actualize(Tensor<?> tensor)
      -
      +
    + + + +
      +
    • +

      _actualize

      +
      protected abstract Data<V> _actualize(Tensor<?> tensor)
    • -
    • -
      -

      _virtualize

      -
      protected abstract Data<V> _virtualize(Tensor<?> tensor)
      -
      +
    + + + +
      +
    • +

      _virtualize

      +
      protected abstract Data<V> _virtualize(Tensor<?> tensor)
    • -
    • -
      -

      _dataTypeOf

      -
      protected abstract DataType<?> _dataTypeOf(Object rawData)
      -
      +
    + + + +
      +
    • +

      _dataTypeOf

      +
      protected abstract DataType<?> _dataTypeOf(java.lang.Object rawData)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/AbstractDeviceData.html b/docs/jdocs/neureka/devices/AbstractDeviceData.html index d377d9d7c..7fd51baba 100644 --- a/docs/jdocs/neureka/devices/AbstractDeviceData.html +++ b/docs/jdocs/neureka/devices/AbstractDeviceData.html @@ -1,324 +1,477 @@ - + + - -AbstractDeviceData (neureka 1.0.0 API) - - - - + +AbstractDeviceData (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractDeviceData<T>

    -
    -
    java.lang.Object -
    neureka.devices.AbstractDeviceData<T>
    +
    neureka.devices
    +

    Class AbstractDeviceData<T>

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.AbstractDeviceData<T>
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    + -
  • -
    -

    Constructor Details

    -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      owner

      -
      public final Device<T> owner()
      -
      -
      Specified by:
      -
      owner in interface Data<T>
      -
      Returns:
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          owner

          +
          public final Device<T> owner()
          +
          +
          Specified by:
          +
          owner in interface Data<T>
          +
          Returns:
          The owner of this data array wrapper (the device which allocated the memory).
          -
    • -
    • -
      -

      getOrNull

      -
      public final Object getOrNull()
      -
      Description copied from interface: Data
      +
    + + + +
      +
    • +

      getOrNull

      +
      public final java.lang.Object getOrNull()
      +
      Description copied from interface: Data
      This returns the underlying raw data object of a nd-array or tensor of a backend specific type (e.g. OpenCL memory object or JVM array). - Contrary to the Nda.getItems() ()} method, this will + Contrary to the Nda.getItems() ()} method, this will return an unbiased view on the raw data of this tensor. Be careful using this, as it exposes mutable state!
      -
      -
      Specified by:
      -
      getOrNull in interface Data<T>
      -
      Returns:
      +
      +
      Specified by:
      +
      getOrNull in interface Data<T>
      +
      Returns:
      The raw data object underlying a nd-array/tensor, or null if the data is not present.
      -
  • -
  • -
    -

    dataType

    -
    public final DataType<T> dataType()
    -
    -
    Specified by:
    -
    dataType in interface Data<T>
    -
    Returns:
    + + + + +
      +
    • +

      dataType

      +
      public final DataType<T> dataType()
      +
      +
      Specified by:
      +
      dataType in interface Data<T>
      +
      Returns:
      The data type of the raw data array.
      -
  • -
  • -
    -

    incrementUsageCount

    -
    public final void incrementUsageCount()
    -
    -
    Specified by:
    -
    incrementUsageCount in interface DeviceData<T>
    + + + + +
  • -
  • -
    -

    decrementUsageCount

    -
    public final void decrementUsageCount()
    -
    -
    Specified by:
    -
    decrementUsageCount in interface DeviceData<T>
    + + + + +
  • -
  • -
    -

    usages

    -
    public final int usages()
    -
    Description copied from interface: Data
    + + + + +
      +
    • +

      usages

      +
      public final int usages()
      +
      Description copied from interface: Data
      This method returns the number of times this data object is currently in use by a nd-array, meaning that the number of usages is also the number of nd-arrays which are currently referencing this data object.
      The reason why this can be greater than one is because of the existence of sliced, transposed and reshaped nd-arrays which all share the same data object as their parent nd-array.
      -
      -
      Specified by:
      -
      usages in interface Data<T>
      -
      Returns:
      +
      +
      Specified by:
      +
      usages in interface Data<T>
      +
      Returns:
      The number of times this data object is currently in use by a nd-array.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/Device.Access.html b/docs/jdocs/neureka/devices/Device.Access.html index 01b8006ee..21ae4b671 100644 --- a/docs/jdocs/neureka/devices/Device.Access.html +++ b/docs/jdocs/neureka/devices/Device.Access.html @@ -1,297 +1,429 @@ - + + - -Device.Access (neureka 1.0.0 API) - - - - + +Device.Access (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Device.Access<V>

    +
    neureka.devices
    +

    Interface Device.Access<V>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The type parameter of the tensor accessed by an instance of this.
      -
      +
      Enclosing interface:
      -
      Device<V>
      +
      Device<V>

      -
      public static interface Device.Access<V>
      +
      +
      public static interface Device.Access<V>
      Implementations of this represent the access to tensors stored on a device in order to read from or write to said tensor.
      Warning: This API exposes the true underlying data of a tensor which does not take into account slice, permute or step information...
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      <T> Data<T>
      - -
       
      -
      void
      -
      cleanup(Runnable action)
      -
      -
      Use this to perform some custom memory cleanup for when the accessed Tensor gets garbage collected.
      +
    • +
    -
    int
    - -
     
    - -
    readAll(boolean clone)
    -
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        <T> Data<T>actualize() 
        voidcleanup(java.lang.Runnable action) +
        Use this to perform some custom memory cleanup for when the accessed Tensor gets garbage collected.
        +
        intgetDataSize() 
        java.lang.ObjectreadAll(boolean clone)
        Use this to read the full data array of the accessed tensor.
        - -
        <A> A
        -
        readArray(Class<A> arrayType, - int start, - int size)
        -
        +
        <A> AreadArray(java.lang.Class<A> arrayType, + int start, + int size)
        Use this to read an array of items from the accessed tensor by specifying a start position of the chunk of data that should be read.
        - - -
        readAt(int index)
        -
        +
        VreadAt(int index)
        Find a particular tensor item by providing its location.
        - -
        <T> Data<T>
        - -
         
        - -
        write(V item)
        -
        +
        <T> Data<T>virtualize() 
        Device.Writerwrite(V item)
        Use this to write a single scalar item into the accessed tensor at one or more positions within the tensor.
        - -
        default void
        - -
        +
        default voidwriteFrom(java.lang.Object array)
        Use this method to write data to the provided tensor, given that the tensor is already stored on this device!

        - - -
        writeFrom(Object array, - int offset)
        -
        +
        Device.WriterwriteFrom(java.lang.Object array, + int offset)
        Use this to write data from an array into the accessed tensor.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        write

        -
        Device.Writer write(V item)
        +
          +
        • + + +

          Method Detail

          + + + + + +
            +
          • +

            write

            +
            Device.Writer write(V item)
            Use this to write a single scalar item into the accessed tensor at one or more positions within the tensor.
            -
            -
            Parameters:
            +
            +
            Parameters:
            item - The item which should be written to the tensor.
            -
            Returns:
            -
            A Device.Writer implementation which expects the type of writing to be specified.
            +
            Returns:
            +
            A Device.Writer implementation which expects the type of writing to be specified.
            -
      • -
      • -
        -

        writeFrom

        -
        Device.Writer writeFrom(Object array, - int offset)
        +
      + + + +
        +
      • +

        writeFrom

        +
        Device.Writer writeFrom(java.lang.Object array,
        +                        int offset)
        Use this to write data from an array into the accessed tensor.
        -
        -
        Parameters:
        +
        +
        Parameters:
        array - The data array whose data should be britten from.
        offset - The start index offset within the provided data array.
        -
        Returns:
        -
        A Device.Writer implementation which expects the type of writing to be specified.
        +
        Returns:
        +
        A Device.Writer implementation which expects the type of writing to be specified.
        -
    • -
    • -
      -

      writeFrom

      -
      default void writeFrom(Object array)
      +
    + + + +
      +
    • +

      writeFrom

      +
      default void writeFrom(java.lang.Object array)
      Use this method to write data to the provided tensor, given that the tensor is already stored on this device!

      -
      -
      Parameters:
      +
      +
      Parameters:
      array - The data inn the form of a primitive array.
      -
    • -
    • -
      -

      readAt

      -
      V readAt(int index)
      +
    + + + +
      +
    • +

      readAt

      +
      V readAt(int index)
      Find a particular tensor item by providing its location.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The index at which a tensor item should be read and returned.
      -
      Returns:
      +
      Returns:
      The tensor item found at the provided location.
      -
    • -
    • -
      -

      readArray

      -
      <A> A readArray(Class<A> arrayType, - int start, - int size)
      +
    + + + +
      +
    • +

      readArray

      +
      <A> A readArray(java.lang.Class<A> arrayType,
      +                int start,
      +                int size)
      Use this to read an array of items from the accessed tensor by specifying a start position of the chunk of data that should be read.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      A - The array type parameter specified by the provided class.
      -
      Parameters:
      +
      Parameters:
      arrayType - The type of (primitive) array which should be read.
      start - The start position of the read cursor.
      size - The number of items which should be read from the tensor.
      -
      Returns:
      +
      Returns:
      An instance of the provided array type class.
      -
    • -
    • -
      -

      readAll

      -
      Object readAll(boolean clone)
      +
    + + + +
      +
    • +

      readAll

      +
      java.lang.Object readAll(boolean clone)
      Use this to read the full data array of the accessed tensor.
      -
      -
      Parameters:
      +
      +
      Parameters:
      clone - The truth value determining if the tensor should be copied or not.
      -
      Returns:
      +
      Returns:
      The full data array of the tensor accessed by this API.
      -
    • -
    • -
      -

      getDataSize

      -
      int getDataSize()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getDataSize

      +
      int getDataSize()
      +
      +
      Returns:
      The size of the underlying data array of the accessed tensor.
      -
    • -
    • -
      -

      cleanup

      -
      void cleanup(Runnable action)
      -
      Use this to perform some custom memory cleanup for when the accessed Tensor gets garbage collected.

      -
      -
      Parameters:
      -
      action - The Runnable action which ought to be performed when the tensor gets garbage collected.
      +
    + + + +
      +
    • +

      cleanup

      +
      void cleanup(java.lang.Runnable action)
      +
      Use this to perform some custom memory cleanup for when the accessed Tensor gets garbage collected.

      +
      +
      Parameters:
      +
      action - The Runnable action which ought to be performed when the tensor gets garbage collected.
      -
    • -
    • -
      -

      actualize

      -
      <T> Data<T> actualize()
      -
      +
    + + + +
      +
    • +

      actualize

      +
      <T> Data<T> actualize()
    • -
    • -
      -

      virtualize

      -
      <T> Data<T> virtualize()
      -
      +
    + + + +
      +
    • +

      virtualize

      +
      <T> Data<T> virtualize()
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/Device.In.html b/docs/jdocs/neureka/devices/Device.In.html index bd3ac3169..222e39031 100644 --- a/docs/jdocs/neureka/devices/Device.In.html +++ b/docs/jdocs/neureka/devices/Device.In.html @@ -1,139 +1,233 @@ - + + - -Device.In (neureka 1.0.0 API) - - - - + +Device.In (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Device.In

    +
    neureka.devices
    +

    Interface Device.In

    -
    -
    +
    +
    +
      +
    • +
      Enclosing interface:
      -
      Device<V>
      +
      Device<V>

      -
      public static interface Device.In
      +
      +
      public static interface Device.In
      The second part of the method chain of the fluent API for executing - tensors on this Device temporarily.
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      <R> R
      -
      in(Supplier<R> lambda)
      -
       
      -
      -
      + tensors on this Device temporarily.
      +
    • +
    - +
    +
      +
    • + +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        in

        -
        <R> R in(Supplier<R> lambda)
        -
        -
        Type Parameters:
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            in

            +
            <R> R in(java.util.function.Supplier<R> lambda)
            +
            +
            Type Parameters:
            R - The return type parameter of the lambda which is expected to be passed to - the context runner Device.In returned by this method.
            -
            Parameters:
            -
            lambda - The lambda during which the previously provided tensors should be stored on this Device.
            -
            Returns:
            + the context runner Device.In returned by this method. +
            Parameters:
            +
            lambda - The lambda during which the previously provided tensors should be stored on this Device.
            +
            Returns:
            The return value, which may be anything.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/Device.Writer.html b/docs/jdocs/neureka/devices/Device.Writer.html index fb65214cc..a5740b8a9 100644 --- a/docs/jdocs/neureka/devices/Device.Writer.html +++ b/docs/jdocs/neureka/devices/Device.Writer.html @@ -1,177 +1,279 @@ - + + - -Device.Writer (neureka 1.0.0 API) - - - - + +Device.Writer (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Device.Writer

    +
    neureka.devices
    +

    Interface Device.Writer

    -
    -
    +
    +
    +
      +
    • +
      Enclosing interface:
      -
      Device<V>
      +
      Device<V>

      -
      public static interface Device.Writer
      +
      +
      public static interface Device.Writer
      Instances of this complete a request for writing to an accessed tensor stored on a device. One may write at a particular position in a tensor, a range of positions or write to every possible value.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default void
      -
      at(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default voidat(int index)
        Writes whatever kind of data was previously specified, to the tensors' data at the position targeted by the provided index.
        - -
        void
        - -
        +
        voidfully()
        A convenience method for specifying that the entire data array of the accessed tensor should be written to.
        - -
        void
        -
        intoRange(int start, - int limit)
        -
        +
        voidintoRange(int start, + int limit)
        Writes whatever kind of data was previously specified, to the tensors' data into the range targeted by the provided start and limit.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        at

        -
        default void at(int index)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            at

            +
            default void at(int index)
            Writes whatever kind of data was previously specified, to the tensors' data at the position targeted by the provided index.
            -
            -
            Parameters:
            +
            +
            Parameters:
            index - The position at which data should be written to.
            -
      • -
      • -
        -

        intoRange

        -
        void intoRange(int start, - int limit)
        +
      + + + +
        +
      • +

        intoRange

        +
        void intoRange(int start,
        +               int limit)
        Writes whatever kind of data was previously specified, to the tensors' data into the range targeted by the provided start and limit.
        -
        -
        Parameters:
        +
        +
        Parameters:
        start - The first position of the writing cursor in the accessed tensor.
        limit - The exclusive limit of the range which should be written to.
        -
    • -
    • -
      -

      fully

      -
      void fully()
      +
    + + + +
      +
    • +

      fully

      +
      void fully()
      A convenience method for specifying that the entire data array of the accessed tensor should be written to. - This is equivalent to calling intoRange(int, int) with the arguments + This is equivalent to calling intoRange(int, int) with the arguments 0 and tensor.size().
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/Device.html b/docs/jdocs/neureka/devices/Device.html index d8f1db96b..d5b156ce0 100644 --- a/docs/jdocs/neureka/devices/Device.html +++ b/docs/jdocs/neureka/devices/Device.html @@ -1,539 +1,717 @@ - + + - -Device (neureka 1.0.0 API) - - - - + +Device (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Device<V>

    +
    neureka.devices
    +

    Interface Device<V>

    -
    -
    -
    Type Parameters:
    -
    V - The super type of all values that can be stored on a Device implementation...
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      V - The super type of all values that can be stored on a Device implementation...
      -
      +
      All Superinterfaces:
      -
      Component<Tensor<V>>, Storage<V>
      +
      Component<Tensor<V>>, Storage<V>
      -
      +
      All Known Implementing Classes:
      -
      AbstractBaseDevice, AbstractDevice, CPU, FileDevice, OpenCLDevice
      +
      AbstractBaseDevice, AbstractDevice, CPU, FileDevice, OpenCLDevice

      -
      public interface Device<V> -extends Component<Tensor<V>>, Storage<V>
      +
      +
      public interface Device<V>
      +extends Component<Tensor<V>>, Storage<V>
      Implementations of this represent computational devices for storing tensors (instances of the Tensor<V> class), which may also expose a useful API for executing operations on tensors (used in backend operations). Such instances are also components of tensors, which is why this interface extends the Component<Tensor<V>> interface.

      - Because devices store tensors, this interface extends the "Storage" interface + Because devices store tensors, this interface extends the "Storage" interface which defines the API for storing them. A tensor stored on a device holds a reference to that device, as well as the device itself which may also know about the tensors it holds. A tensor stored on a device will have its "isOutsourced" property set to true!
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Interface
      -
      Description
      -
      static interface 
      - -
      +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeInterface and Description
        static interface Device.Access<V>
        Implementations of this represent the access to tensors stored on a device in order to read from or write to said tensor.
        - -
        static interface 
        - -
        +
        static interface Device.In
        The second part of the method chain of the fluent API for executing - tensors on this Device temporarily.
        - -
        static interface 
        - -
        + tensors on this Device temporarily.
        +
        static interface Device.Writer
        Instances of this complete a request for writing to an accessed tensor stored on a device.
        - - -
        -

        Nested classes/interfaces inherited from interface neureka.common.composition.Component

        -Component.IsBeing, Component.OwnerChangeRequest<O>
        - +
        +
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      <T extends V>
      Device.Access<T>
      -
      access(Tensor<T> tensor)
      -
      + -
      -
      -
        + +
      +
    • +
      +
        +
      • -
      • -
        -

        Method Details

        -
          -
        • -
          -

          find

          -
          static Optional<Device<Object>> find(String... searchKeys)
          -
          This method returns Device instances matching +
            +
          • + + +

            Method Detail

            + + + +
              +
            • +

              find

              +
              static java.util.Optional<Device<java.lang.Object>> find(java.lang.String... searchKeys)
              +
              This method returns Device instances matching the given search parameter.
              -
              -
              Parameters:
              -
              searchKeys - The search parameter and name of the requested Device instance.
              -
              Returns:
              -
              The found Device instance or simply the CPU instance by default.
              +
              +
              Parameters:
              +
              searchKeys - The search parameter and name of the requested Device instance.
              +
              Returns:
              +
              The found Device instance or simply the CPU instance by default.
              -
        • -
        • -
          -

          find

          -
          static <T, -D extends Device<T>> Optional<D> find(Class<D> deviceType, - String... searchKeys)
          -
          This method returns Device instances matching +
        + + + +
          +
        • +

          find

          +
          static <T,D extends Device<T>> java.util.Optional<D> find(java.lang.Class<D> deviceType,
          +                                                          java.lang.String... searchKeys)
          +
          This method returns Device instances matching the given search parameters.
          -
          -
          Type Parameters:
          +
          +
          Type Parameters:
          T - The value super types of the tensors stored on the requested device.
          D - The device type parameter.
          -
          Parameters:
          +
          Parameters:
          deviceType - The device type class which should be found.
          -
          searchKeys - The search parameter and name of the requested Device instance.
          -
          Returns:
          -
          The found Device instance or simply the CPU instance by default.
          +
          searchKeys - The search parameter and name of the requested Device instance.
          +
          Returns:
          +
          The found Device instance or simply the CPU instance by default.
          -
      • -
      • -
        -

        any

        -
        static Device<Object> any(String... searchKeys)
        -
        This method returns Device instances matching +
      + + + +
        +
      • +

        any

        +
        static Device<java.lang.Object> any(java.lang.String... searchKeys)
        +
        This method returns Device instances matching the given search parameter. - If the provided keys do not match anything then this method will simply return a CPU instance.
        -
        -
        Parameters:
        -
        searchKeys - The search parameter and name of the requested Device instance.
        -
        Returns:
        -
        The found Device instance or simply the CPU instance by default.
        + If the provided keys do not match anything then this method will simply return a CPU instance.
      +
      +
      Parameters:
      +
      searchKeys - The search parameter and name of the requested Device instance.
      +
      Returns:
      +
      The found Device instance or simply the CPU instance by default.
      -
    • -
    • -
      -

      get

      -
      static Device<Object> get(String... searchKeys)
      -
      This method returns Device instances matching +
    + + + +
      +
    • +

      get

      +
      static Device<java.lang.Object> get(java.lang.String... searchKeys)
      +
      This method returns Device instances matching the given search parameter. If the provided keys do not match anything then this method may return null.
      -
      -
      Parameters:
      -
      searchKeys - The search parameter and name of the requested Device instance.
      -
      Returns:
      -
      The found Device instance or simply null by default.
      +
      +
      Parameters:
      +
      searchKeys - The search parameter and name of the requested Device instance.
      +
      Returns:
      +
      The found Device instance or simply null by default.
      -
    • -
    • -
      -

      get

      -
      static <T, -D extends Device<T>> D get(Class<D> deviceType, - String... searchKeys)
      -
      This method returns Device instances matching +
    + + + +
      +
    • +

      get

      +
      static <T,D extends Device<T>> D get(java.lang.Class<D> deviceType,
      +                                     java.lang.String... searchKeys)
      +
      This method returns Device instances matching the given search parameters.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The value super types of the tensors stored on the requested device.
      D - The device type parameter.
      -
      Parameters:
      +
      Parameters:
      deviceType - The device type class which should be found.
      -
      searchKeys - The search parameter and name of the requested Device instance.
      -
      Returns:
      -
      The found Device instance or null if nothing was found which matches the provided search hints well enough.
      +
      searchKeys - The search parameter and name of the requested Device instance.
      +
      Returns:
      +
      The found Device instance or null if nothing was found which matches the provided search hints well enough.
      -
    • -
    • -
      -

      numberOfDataObjects

      -
      int numberOfDataObjects()
      -
      Note that this is not necessarily equal to Storage.numberOfStored(), because - multiple tensors may share a single Data object.
      -
      -
      Returns:
      -
      The number of Data objects stored on this Device.
      +
    + + + +
      +
    • +

      numberOfDataObjects

      +
      int numberOfDataObjects()
      +
      Note that this is not necessarily equal to Storage.numberOfStored(), because + multiple tensors may share a single Data object.
      +
      +
      Returns:
      +
      The number of Data objects stored on this Device.
      -
    • -
    • -
      -

      dispose

      -
      void dispose()
      +
    + + + +
      +
    • +

      dispose

      +
      void dispose()
      This method signals the device to get ready for garbage collection. A given device may have resources which ought to be freed when it is no longer used. One may also choose to do resource freeing manually.
      -
    • -
    • -
      -

      has

      -
      <T extends V> boolean has(Tensor<T> tensor)
      -
      Use this to check if a tensor is stored on this Device!

      -
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      -
      Parameters:
      -
      tensor - The tensor which may or may not be stored on this Device.
      -
      Returns:
      -
      The truth value determining if the provided tensor is stored on this Device.
      +
    + + + +
      +
    • +

      has

      +
      <T extends V> boolean has(Tensor<T> tensor)
      +
      Use this to check if a tensor is stored on this Device!

      +
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      +
      Parameters:
      +
      tensor - The tensor which may or may not be stored on this Device.
      +
      Returns:
      +
      The truth value determining if the provided tensor is stored on this Device.
      -
    • -
    • -
      -

      free

      -
      <T extends V> Device<V> free(Tensor<T> tensor)
      -
      Use this to remove the provided tensor from this Device!

      -
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      -
      Parameters:
      -
      tensor - The tensor which ought to be removed from this Device.
      -
      Returns:
      +
    + + + +
      +
    • +

      free

      +
      <T extends VDevice<V> free(Tensor<T> tensor)
      +
      Use this to remove the provided tensor from this Device!

      +
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      +
      Parameters:
      +
      tensor - The tensor which ought to be removed from this Device.
      +
      Returns:
      This very instance to allow for method chaining.
      -
    • -
    • -
      -

      access

      -
      <T extends V> Device.Access<T> access(Tensor<T> tensor)
      +
    + + + +
      +
    • +

      access

      +
      <T extends VDevice.Access<T> access(Tensor<T> tensor)
      This method exposes the tensor access API for reading from or writing to a tensor stored on this device. It may return null if this device does not support accessing stored tensors.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter of the tensor for which the access API should be returned.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor whose data ought to be accessed.
      -
      Returns:
      +
      Returns:
      The tensor access API for reading from or writing to a tensor stored on this device.
      -
    • -
    • -
      -

      approve

      -
      Device<V> approve(ExecutionCall<? extends Device<?>> call)
      -
      This method is used internally to give Device implementations the opportunity - to perform some exception handling before the ExecutionCall will be dispatched. +
    + + + +
      +
    • +

      approve

      +
      Device<V> approve(ExecutionCall<? extends Device<?>> call)
      +
      This method is used internally to give Device implementations the opportunity + to perform some exception handling before the ExecutionCall will be dispatched. Use this for debugging when doing custom backend operations.
      -
      -
      Parameters:
      -
      call - The ExecutionCall which should be approved by this Device before execution.
      -
      Returns:
      +
      +
      Parameters:
      +
      call - The ExecutionCall which should be approved by this Device before execution.
      +
      Returns:
      This very instance to allow for method chaining.
      -
    • -
    • -
      -

      allocate

      -
      <T extends V> Data<T> allocate(DataType<T> dataType, - NDConfiguration ndc)
      -
      +
    + + + + + + + +
      +
    • +

      allocate

      +
      default <T extends VData<T> allocate(DataType<T> dataType,
      +                                       int size)
    • -
    • -
      -

      allocateFromOne

      -
      <T extends V> Data<T> allocateFromOne(DataType<T> dataType, - NDConfiguration ndc, - T initialValue)
      -
      +
    + + + + + + + + + +
      +
    • +

      allocateFromAll

      +
      <T extends VData<T> allocateFromAll(DataType<T> dataType,
      +                                      NDConfiguration ndc,
      +                                      java.lang.Object jvmData)
    • -
    • -
      -

      optimizedOperationOf

      -
      Operation optimizedOperationOf(Function function, - String name)
      -
      This method tries to allow this device to produce an optimized Operation +
    + + + +
      +
    • +

      optimizedOperationOf

      +
      Operation optimizedOperationOf(Function function,
      +                               java.lang.String name)
      +
      This method tries to allow this device to produce an optimized Operation based on the provided function. This is especially useful in an OpenCL context which can compile the function into native GPU kernels at runtime.
      -
      -
      Parameters:
      +
      +
      Parameters:
      function - The function which should be turned into an optimized operation.
      name - The name of the returned operation.
      -
      Returns:
      +
      Returns:
      An optimized operation based on the provided function, or null if optimization is not possible.
      -
    • -
    • -
      -

      optimizedFunctionOf

      -
      default Function optimizedFunctionOf(Function function, - String name)
      -
      This method tries to allow this device to produce an optimized Function +
    + + + +
      +
    • +

      optimizedFunctionOf

      +
      default Function optimizedFunctionOf(Function function,
      +                                     java.lang.String name)
      +
      This method tries to allow this device to produce an optimized Function based on the provided function. This is especially useful in an OpenCL context which can compile the function into native GPU kernels at runtime.
      -
      -
      Parameters:
      +
      +
      Parameters:
      function - The function which should be used to design a new optimized function.
      name - The name of the optimized operation underlying the returned function.
      -
      Returns:
      +
      Returns:
      An instance of the optimized function.
      -
    • -
    • -
      -

      borrow

      -
      default Device.In borrow(Tensor<V> first, - Tensor<V>... rest)
      +
    + + + +
      +
    • +

      borrow

      +
      default Device.In borrow(Tensor<V> first,
      +                         Tensor<V>... rest)
      This is a very simple fluent API for temporarily storing a number - of tensors on this Device, executing a provided lambda action, + of tensors on this Device, executing a provided lambda action, and then migrating all the tensors back to their original devices.

      -
      -
      Parameters:
      +
      +
      Parameters:
      first - The first tensor among all passed tensors which ought to be - stored temporarily on this Device.
      + stored temporarily on this Device.
      rest - Any number of other tensors passed to this method to be - stored temporarily on this Device.
      -
      Returns:
      + stored temporarily on this Device. +
      Returns:
      A simple lambda runner which will migrate the tensors passed to this method to - this very Device, execute the provided lambda, and then migrate all the + this very Device, execute the provided lambda, and then migrate all the tensors back to their original devices!
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/DeviceCleaner.html b/docs/jdocs/neureka/devices/DeviceCleaner.html index 444c68d0a..8c7bec67d 100644 --- a/docs/jdocs/neureka/devices/DeviceCleaner.html +++ b/docs/jdocs/neureka/devices/DeviceCleaner.html @@ -1,164 +1,269 @@ - + + - -DeviceCleaner (neureka 1.0.0 API) - - - - + +DeviceCleaner (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface DeviceCleaner

    +
    neureka.devices
    +

    Interface DeviceCleaner

    -
    +
    +
    +
      +

    • -
      public interface DeviceCleaner
      -
    -
    -
      - -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      static final DeviceCleaner
      - -
       
      +
      +
      public interface DeviceCleaner
      +
    • +
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getNewInstance

      -
      static DeviceCleaner getNewInstance()
      -
      +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          register

          +
          void register(java.lang.Object o,
          +              java.lang.Runnable action)
        • -
        • -
          -

          register

          -
          void register(Object o, - Runnable action)
          -
          +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/DeviceData.html b/docs/jdocs/neureka/devices/DeviceData.html index 55184b9f8..2b407692d 100644 --- a/docs/jdocs/neureka/devices/DeviceData.html +++ b/docs/jdocs/neureka/devices/DeviceData.html @@ -1,151 +1,253 @@ - + + - -DeviceData (neureka 1.0.0 API) - - - - + +DeviceData (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface DeviceData<V>

    +
    neureka.devices
    +

    Interface DeviceData<V>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The data type of the data.
      -
      +
      All Superinterfaces:
      -
      Data<V>
      +
      Data<V>
      -
      +
      All Known Implementing Classes:
      -
      AbstractDeviceData
      +
      AbstractDeviceData

      -
      public interface DeviceData<V> -extends Data<V>
      -
      A sub-interface of the Data interface providing +
      +
      public interface DeviceData<V>
      +extends Data<V>
      +
      A sub-interface of the Data interface providing more device specific methods.
      -
    -
    -
    -
    -

    Methods inherited from interface neureka.Data

    -as, dataType, get, getOrNull, owner, usages
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        incrementUsageCount

        -
        void incrementUsageCount()
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            incrementUsageCount

            +
            void incrementUsageCount()
          • -
          • -
            -

            decrementUsageCount

            -
            void decrementUsageCount()
            -
            +
          + + + +
            +
          • +

            decrementUsageCount

            +
            void decrementUsageCount()
          -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/ReferenceCounter.ChangeEvent.html b/docs/jdocs/neureka/devices/ReferenceCounter.ChangeEvent.html index e16011359..b38ab95ae 100644 --- a/docs/jdocs/neureka/devices/ReferenceCounter.ChangeEvent.html +++ b/docs/jdocs/neureka/devices/ReferenceCounter.ChangeEvent.html @@ -1,184 +1,302 @@ - + + - -ReferenceCounter.ChangeEvent (neureka 1.0.0 API) - - - - + +ReferenceCounter.ChangeEvent (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ReferenceCounter.ChangeEvent

    -
    -
    java.lang.Object -
    neureka.devices.ReferenceCounter.ChangeEvent
    +
    neureka.devices
    +

    Class ReferenceCounter.ChangeEvent

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.ReferenceCounter.ChangeEvent
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      ChangeEvent(ReferenceCounter.ChangeType changeType, - int change, - int count)
      -
       
      +
      +
      public static class ReferenceCounter.ChangeEvent
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/ReferenceCounter.ChangeType.html b/docs/jdocs/neureka/devices/ReferenceCounter.ChangeType.html index 8e2adb239..5f1b06289 100644 --- a/docs/jdocs/neureka/devices/ReferenceCounter.ChangeType.html +++ b/docs/jdocs/neureka/devices/ReferenceCounter.ChangeType.html @@ -1,229 +1,354 @@ - + + - -ReferenceCounter.ChangeType (neureka 1.0.0 API) - - - - + +ReferenceCounter.ChangeType (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class ReferenceCounter.ChangeType

    -
    -
    java.lang.Object -
    java.lang.Enum<ReferenceCounter.ChangeType> -
    neureka.devices.ReferenceCounter.ChangeType
    -
    +
    neureka.devices
    +

    Enum ReferenceCounter.ChangeType

    -
    -
    +
    + +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static ReferenceCounter.ChangeType[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static ReferenceCounter.ChangeType[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (ReferenceCounter.ChangeType c : ReferenceCounter.ChangeType.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static ReferenceCounter.ChangeType valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static ReferenceCounter.ChangeType valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/ReferenceCounter.html b/docs/jdocs/neureka/devices/ReferenceCounter.html index 21b0ada8b..0b7eae27d 100644 --- a/docs/jdocs/neureka/devices/ReferenceCounter.html +++ b/docs/jdocs/neureka/devices/ReferenceCounter.html @@ -1,203 +1,330 @@ - + + - -ReferenceCounter (neureka 1.0.0 API) - - - - + +ReferenceCounter (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ReferenceCounter

    -
    -
    java.lang.Object -
    neureka.devices.ReferenceCounter
    +
    neureka.devices
    +

    Class ReferenceCounter

    -
    -
    -
    public final class ReferenceCounter -extends Object
    -
    -
    -
      - +
      + +
      +
        +
      • +
        +
        +
        public final class ReferenceCounter
        +extends java.lang.Object
        +
      • +
      -
    +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public void increment()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          increment

          +
          public void increment()
        • -
        • -
          -

          decrement

          -
          public void decrement()
          -
          +
        + + + +
          +
        • +

          decrement

          +
          public void decrement()
        • -
        • -
          -

          fullDelete

          -
          public void fullDelete()
          -
          +
        + + + +
          +
        • +

          fullDelete

          +
          public void fullDelete()
        • -
        • -
          -

          count

          -
          public int count()
          -
          +
        + + + +
          +
        • +

          count

          +
          public int count()
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/Storage.html b/docs/jdocs/neureka/devices/Storage.html index 3390df8f0..348c7c181 100644 --- a/docs/jdocs/neureka/devices/Storage.html +++ b/docs/jdocs/neureka/devices/Storage.html @@ -1,211 +1,321 @@ - + + - -Storage (neureka 1.0.0 API) - - - - + +Storage (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Storage<V>

    +
    neureka.devices
    +

    Interface Storage<V>

    -
    -
    +
    +
    +
      +
    • +
      All Known Subinterfaces:
      -
      Device<V>, FileHandle<FinalType,ValType>
      +
      Device<V>, FileHandle<FinalType,ValType>
      -
      +
      All Known Implementing Classes:
      -
      AbstractBaseDevice, AbstractDevice, CPU, CSVHandle, FileDevice, IDXHandle, OpenCLDevice
      +
      AbstractBaseDevice, AbstractDevice, CPU, CSVHandle, FileDevice, IDXHandle, OpenCLDevice

      -
      public interface Storage<V>
      +
      +
      public interface Storage<V>
      This is an abstract interface which simply describes "a thing that stores tensors". Therefore, the expected method signatures defining this abstract entity boil down to a "store" and a "restore" method. Classes like "OpenCLDevice" or "FileDevice" implement this interface indirectly (via the Device interface) because they are in essence also just entities that store tensors! - Besides the "Device" interface this interface is also extended by the FileHandle interface + Besides the "Device" interface this interface is also extended by the FileHandle interface which is an internal component of the FileDevice architecture...
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        store

        -
        <T extends V> Storage<V> store(Tensor<T> tensor)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            store

            +
            <T extends VStorage<V> store(Tensor<T> tensor)
            Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type. Classes like "OpenCLDevice" or "FileDevice" for example are tensor storages.
            -
            -
            Type Parameters:
            +
            +
            Type Parameters:
            T - A valid data type of the tensor which should be stored on this device.
            -
            Parameters:
            +
            Parameters:
            tensor - The tensor whose data ought to be stored.
            -
            Returns:
            -
            This Storage instance, to allow for method chaining.
            +
            Returns:
            +
            This Storage instance, to allow for method chaining.
            -
      • -
      • -
        -

        restore

        -
        Storage<V> restore(Tensor<V> tensor)
        -
        -
        Parameters:
        +
      + + + +
        +
      • +

        restore

        +
        Storage<V> restore(Tensor<V> tensor)
        +
        +
        Parameters:
        tensor - The tensor whose data ought to be restored (loaded to RAM/CPU device).
        -
        Returns:
        -
        This Storage instance, to allow for method chaining.
        +
        Returns:
        +
        This Storage instance, to allow for method chaining.
        -
    • -
    • -
      -

      numberOfStored

      -
      int numberOfStored()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfStored

      +
      int numberOfStored()
      +
      +
      Returns:
      The number of nd-array stored on this.
      -
    • -
    • -
      -

      isEmpty

      -
      boolean isEmpty()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      isEmpty

      +
      boolean isEmpty()
      +
      +
      Returns:
      The truth value determining if there are no tensors stored on this or false if there are tensors stored.
      -
    • -
    • -
      -

      contains

      -
      boolean contains(Tensor<V> o)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      contains

      +
      boolean contains(Tensor<V> o)
      +
      +
      Parameters:
      o - The tensor which may or may not be stored on this.
      -
      Returns:
      +
      Returns:
      The truth value determining if the provided tensor is stored on this.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/file/CSVHandle.html b/docs/jdocs/neureka/devices/file/CSVHandle.html index 458925de3..c42cd962e 100644 --- a/docs/jdocs/neureka/devices/file/CSVHandle.html +++ b/docs/jdocs/neureka/devices/file/CSVHandle.html @@ -1,580 +1,805 @@ - + + - -CSVHandle (neureka 1.0.0 API) - - - - + +CSVHandle (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CSVHandle

    -
    -
    java.lang.Object -
    neureka.devices.file.CSVHandle
    +
    neureka.devices.file
    +

    Class CSVHandle

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.file.CSVHandle
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      FileHandle<CSVHandle,String>, Storage<String>
      +
      FileHandle<CSVHandle,java.lang.String>, Storage<java.lang.String>

      -
      public final class CSVHandle -extends Object
      +
      +
      public final class CSVHandle
      +extends java.lang.Object
      This class is one of many extensions of the AbstractFileHandle which - is therefore ultimately an implementation of the FileHandle interface. - Like other FileHandle implementations this class represents a file + is therefore ultimately an implementation of the FileHandle interface. + Like other FileHandle implementations this class represents a file of a given type, in this case it represents a CSV file.
      -
    -
    -
      - -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final String
      - -
       
      -
      protected static org.slf4j.Logger
      - -
       
      -
      protected int
      - -
       
      +
    • +
    -
    -

    Fields inherited from interface neureka.devices.file.FileHandle

    -FACTORY
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        _LOG

        -
        protected static org.slf4j.Logger _LOG
        -
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            _LOG

            +
            protected static org.slf4j.Logger _LOG
          • -
          • -
            -

            _fileName

            -
            protected final String _fileName
            -
            +
          + + + +
            +
          • +

            _fileName

            +
            protected final java.lang.String _fileName
          • -
          • -
            -

            _size

            -
            protected int _size
            -
            +
          + + + +
            +
          • +

            _size

            +
            protected int _size
          -
    • +
    -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      CSVHandle

      -
      public CSVHandle(String fileName, - Map<String,Object> settings)
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CSVHandle

          +
          public CSVHandle(java.lang.String fileName,
          +                 java.util.Map<java.lang.String,java.lang.Object> settings)
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      store

      -
      public <T extends String> Storage<String> store(Tensor<T> tensor)
      -
      Description copied from interface: Storage
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          store

          +
          public <T extends java.lang.String> Storage<java.lang.String> store(Tensor<T> tensor)
          +
          Description copied from interface: Storage
          Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type. Classes like "OpenCLDevice" or "FileDevice" for example are tensor storages.
          -
          -
          Type Parameters:
          +
          +
          Type Parameters:
          T - A valid data type of the tensor which should be stored on this device.
          -
          Parameters:
          +
          Parameters:
          tensor - The tensor whose data ought to be stored.
          -
          Returns:
          -
          This Storage instance, to allow for method chaining.
          +
          Returns:
          +
          This Storage instance, to allow for method chaining.
          -
    • -
    • -
      -

      _loadData

      -
      protected Object _loadData()
      -
      +
    + + + +
      +
    • +

      _loadData

      +
      protected java.lang.Object _loadData()
    • -
    • -
      -

      load

      -
      public Tensor<String> load() - throws IOException
      -
      Description copied from interface: FileHandle
      +
    + + + +
      +
    • +

      load

      +
      public Tensor<java.lang.String> load()
      +                              throws java.io.IOException
      +
      Description copied from interface: FileHandle
      An implementation of this method ought to create a new tensor instance containing the data which - is stored in the file whose access this FileHandle manages.
      -
      -
      Returns:
      + is stored in the file whose access this FileHandle manages.
  • +
    +
    Returns:
    A new tensor filled with the data from the targeted file.
    -
    Throws:
    -
    IOException - If loading goes wrong an exception is being thrown.
    +
    Throws:
    +
    java.io.IOException - If loading goes wrong an exception is being thrown.
    - -
  • -
    -

    getValueSize

    -
    public int getValueSize()
    -
    Description copied from interface: FileHandle
    + + + + +
      +
    • +

      getValueSize

      +
      public int getValueSize()
      +
      Description copied from interface: FileHandle
      This method return the size of the value which is stored - in the tensor of the file which is managed by this FileHandle. + in the tensor of the file which is managed by this FileHandle. The size however does not represent the byte size of the data. This means that the returned size is dependent on the data type of the underlying data of the file...
      -
      -
      Returns:
      +
      +
      Returns:
      The size of the value of the underlying tensor body.
      -
  • -
  • -
    -

    getDataSize

    -
    public int getDataSize()
    -
    Description copied from interface: FileHandle
    + + + + +
      +
    • +

      getDataSize

      +
      public int getDataSize()
      +
      Description copied from interface: FileHandle
      This method returns the byte size of the data which is stored - in the tensor of the file which is managed by this FileHandle. + in the tensor of the file which is managed by this FileHandle. The underlying datatype of the data within the file does not matter.
      -
      -
      Returns:
      +
      +
      Returns:
      The byte size of the data of the underlying tensor body.
      -
  • -
  • -
    -

    getTotalSize

    -
    public int getTotalSize()
    -
    Description copied from interface: FileHandle
    + + + + +
      +
    • +

      getTotalSize

      +
      public int getTotalSize()
      +
      Description copied from interface: FileHandle
      This method returns the number of bytes which are used to store the tensor in the file whose access is being managed by an implementation - of th FileHandle interface. + of th FileHandle interface. Metadata stored inside the file will also be included in this returned size.
      -
      -
      Returns:
      +
      +
      Returns:
      The byte size of all the bytes used to represent the tensor in the file.
      -
  • -
  • -
    -

    getDataType

    -
    public DataType<?> getDataType()
    -
    -
    Returns:
    -
    The data type of the tensor stored in the file which is managed by a FileHandle.
    + + + + +
      +
    • +

      getDataType

      +
      public DataType<?> getDataType()
      +
      +
      Returns:
      +
      The data type of the tensor stored in the file which is managed by a FileHandle.
      -
  • -
  • -
    -

    getShape

    -
    public Shape getShape()
    -
    -
    Returns:
    -
    The shape of the tensor stored in the file which is managed by a FileHandle.
    + + + + +
      +
    • +

      getShape

      +
      public Shape getShape()
      +
      +
      Returns:
      +
      The shape of the tensor stored in the file which is managed by a FileHandle.
      -
  • -
  • -
    -

    getDelimiter

    -
    public String getDelimiter()
    -
    + + + + +
      +
    • +

      getDelimiter

      +
      public java.lang.String getDelimiter()
    • -
    • -
      -

      isFirstRowIsLabels

      -
      public boolean isFirstRowIsLabels()
      -
      +
    + + + +
      +
    • +

      isFirstRowIsLabels

      +
      public boolean isFirstRowIsLabels()
    • -
    • -
      -

      getColLabels

      -
      public String[] getColLabels()
      -
      +
    + + + +
      +
    • +

      getColLabels

      +
      public java.lang.String[] getColLabels()
    • -
    • -
      -

      isFirstColIsIndex

      -
      public boolean isFirstColIsIndex()
      -
      +
    + + + +
      +
    • +

      isFirstColIsIndex

      +
      public boolean isFirstColIsIndex()
    • -
    • -
      -

      getRowLabels

      -
      public String[] getRowLabels()
      -
      +
    + + + +
      +
    • +

      getRowLabels

      +
      public java.lang.String[] getRowLabels()
    • -
    • -
      -

      getNumberOfRows

      -
      public Integer getNumberOfRows()
      -
      +
    + + + +
      +
    • +

      getNumberOfRows

      +
      public java.lang.Integer getNumberOfRows()
    • -
    • -
      -

      getNumberOfColumns

      -
      public Integer getNumberOfColumns()
      -
      +
    + + + +
      +
    • +

      getNumberOfColumns

      +
      public java.lang.Integer getNumberOfColumns()
    • -
    • -
      -

      numberOfStored

      -
      public int numberOfStored()
      -
      -
      Specified by:
      -
      numberOfStored in interface Storage<C>
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfStored

      +
      public int numberOfStored()
      +
      +
      Specified by:
      +
      numberOfStored in interface Storage<V>
      +
      Returns:
      The number of nd-array stored on this.
      -
    • -
    • -
      -

      isEmpty

      -
      public boolean isEmpty()
      -
      -
      Specified by:
      -
      isEmpty in interface Storage<C>
      -
      Returns:
      +
    + + + +
      +
    • +

      isEmpty

      +
      public boolean isEmpty()
      +
      +
      Specified by:
      +
      isEmpty in interface Storage<V>
      +
      Returns:
      The truth value determining if there are no tensors stored on this or false if there are tensors stored.
      -
    • -
    • -
      -

      contains

      -
      public boolean contains(Tensor<String> o)
      -
      -
      Specified by:
      -
      contains in interface Storage<C>
      -
      Parameters:
      +
    + + + +
      +
    • +

      contains

      +
      public boolean contains(Tensor<V> o)
      +
      +
      Specified by:
      +
      contains in interface Storage<V>
      +
      Parameters:
      o - The tensor which may or may not be stored on this.
      -
      Returns:
      +
      Returns:
      The truth value determining if the provided tensor is stored on this.
      -
    • -
    • -
      -

      _loadFile

      -
      protected File _loadFile()
      -
      +
    + + + + + + + +
      +
    • +

      _loadFileInputStream

      +
      protected java.io.FileInputStream _loadFileInputStream()
      +                                                throws java.io.IOException
      +
      +
      Throws:
      +
      java.io.IOException
      -
    • -
    • -
      -

      free

      -
      public CSVHandle free()
      -
      Description copied from interface: FileHandle
      +
    + + + +
      +
    • +

      free

      +
      public C free()
      +
      Description copied from interface: FileHandle
      An implementation of this method ought to "free" up the memory used to store a tensor. Therefore, the method is expected to delete the underlying file - whose access this very FileHandle implementation manages. + whose access this very FileHandle implementation manages. The method also returns an instance of the final implementation of this class, meaning it adheres to the factory pattern.
      -
      -
      Specified by:
      -
      free in interface FileHandle<C,V>
      -
      Returns:
      +
      +
      Specified by:
      +
      free in interface FileHandle<C,V>
      +
      Returns:
      A reference of this very object in order to enable method chaining.
      -
    • -
    • -
      -

      getLocation

      -
      public String getLocation()
      -
      -
      Specified by:
      -
      getLocation in interface FileHandle<C,V>
      -
      Returns:
      +
    + + + +
      +
    • +

      getLocation

      +
      public java.lang.String getLocation()
      +
      +
      Specified by:
      +
      getLocation in interface FileHandle<C,V>
      +
      Returns:
      The full path as well as name of the file which stores a tensor.
      -
    • -
    • -
      -

      getFileName

      -
      public String getFileName()
      -
      -
      Specified by:
      -
      getFileName in interface FileHandle<C,V>
      -
      Returns:
      +
    + + + +
      +
    • +

      getFileName

      +
      public java.lang.String getFileName()
      +
      +
      Specified by:
      +
      getFileName in interface FileHandle<C,V>
      +
      Returns:
      The name of the file which stores a tensor.
      -
    • -
    • -
      -

      restore

      -
      public Storage<String> restore(Tensor<String> tensor)
      -
      -
      Specified by:
      -
      restore in interface Storage<C>
      -
      Parameters:
      +
    + + + +
      +
    • +

      restore

      +
      public Storage<V> restore(Tensor<V> tensor)
      +
      +
      Specified by:
      +
      restore in interface Storage<V>
      +
      Parameters:
      tensor - The tensor whose data ought to be restored (loaded to RAM/CPU device).
      -
      Returns:
      -
      This Storage instance, to allow for method chaining.
      +
      Returns:
      +
      This Storage instance, to allow for method chaining.
      -
    • -
    • -
      -

      extension

      -
      public final String extension()
      -
      Description copied from interface: FileHandle
      +
    + + + +
      +
    • +

      extension

      +
      public final java.lang.String extension()
      +
      Description copied from interface: FileHandle
      The file ending which comes after the '.' character...
      -
      -
      Specified by:
      -
      extension in interface FileHandle<C,V>
      -
      Returns:
      +
      +
      Specified by:
      +
      extension in interface FileHandle<C,V>
      +
      Returns:
      The file ending which implies the encoding of the data in the file.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/file/FileDevice.html b/docs/jdocs/neureka/devices/file/FileDevice.html index 0f7517d98..a60c25c06 100644 --- a/docs/jdocs/neureka/devices/file/FileDevice.html +++ b/docs/jdocs/neureka/devices/file/FileDevice.html @@ -1,510 +1,655 @@ - + + - -FileDevice (neureka 1.0.0 API) - - - - + +FileDevice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FileDevice

    +
    neureka.devices.file
    +

    Class FileDevice

    -
    java.lang.Object -
    neureka.devices.AbstractBaseDevice<Object> -
    neureka.devices.file.FileDevice
    -
    -
    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Component<Tensor<Object>>, Device<Object>, Storage<Object>
      +
      Component<Tensor<java.lang.Object>>, Device<java.lang.Object>, Storage<java.lang.Object>

      -
      public final class FileDevice -extends AbstractBaseDevice<Object>
      -
      The FileDevice is a Device implementation +
      +
      public final class FileDevice
      +extends AbstractBaseDevice<java.lang.Object>
      +
      The FileDevice is a Device implementation responsible for reading tensors from and or writing them to a given directory.

      - The abstraction provided by the "Device" interface + The abstraction provided by the "Device" interface does not necessitate that concrete implementations represent accelerator hardware.
      Generally speaking a device is a thing that stores tensors and optionally - also expose the Device.Access API for + also expose the Device.Access API for data access as well as an API useful for implementing operations... But, an implementation might also represent a simple storage device like your local SSD ord HDD, or in this case, a directory...

      The directory which ought to be governed by an instance of this - class has to be passed to the at(String) factory method (as relative path), + class has to be passed to the at(String) factory method (as relative path), after which the files within this directory will be read, making potential tensors accessible. Tensors on a file device however are not loaded onto memory entirely, instead a mere file handle for each "file tensor" is being instantiated. Therefore, tensors that are stored on this device are not fit for computation. - The restore(Tensor) method has to be called in order to load the provided + The restore(Tensor) method has to be called in order to load the provided tensor back into RAM.

      - A FileDevice can load PNG, JPG and IDX files. By default, tensors will + A FileDevice can load PNG, JPG and IDX files. By default, tensors will be stored as IDX files if not explicitly specified otherwise.

      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        at

        -
        public static FileDevice at(String path)
        -
        -
        Parameters:
        -
        path - The directory path for which the responsible FileDevice instance ought to be returned.
        -
        Returns:
        -
        A FileDevice instance representing the provided directory path and all compatible files within it.
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            at

            +
            public static FileDevice at(java.lang.String path)
            +
            +
            Parameters:
            +
            path - The directory path for which the responsible FileDevice instance ought to be returned.
            +
            Returns:
            +
            A FileDevice instance representing the provided directory path and all compatible files within it.
            -
      • -
      • -
        -

        load

        -
        public <V> Optional<Tensor<V>> load(String filename) - throws IOException
        -
        -
        Throws:
        -
        IOException
        +
      + + + +
        +
      • +

        load

        +
        public <V> java.util.Optional<Tensor<V>> load(java.lang.String filename)
        +                                       throws java.io.IOException
        +
        +
        Throws:
        +
        java.io.IOException
        -
    • -
    • -
      -

      load

      -
      public <V> Optional<Tensor<V>> load(String filename, - Map<String,Object> conf) - throws IOException
      -
      -
      Throws:
      -
      IOException
      +
    + + + +
      +
    • +

      load

      +
      public <V> java.util.Optional<Tensor<V>> load(java.lang.String filename,
      +                                              java.util.Map<java.lang.String,java.lang.Object> conf)
      +                                       throws java.io.IOException
      +
      +
      Throws:
      +
      java.io.IOException
      -
    • -
    • -
      -

      fileHandleOf

      -
      public FileHandle<?,?> fileHandleOf(Tensor<?> tensor)
      -
      +
    + + + +
      +
    • +

      fileHandleOf

      +
      public FileHandle<?,?> fileHandleOf(Tensor<?> tensor)
    • -
    • -
      -

      dispose

      -
      public void dispose()
      -
      Description copied from interface: Device
      +
    + + + +
      +
    • +

      dispose

      +
      public void dispose()
      +
      Description copied from interface: Device
      This method signals the device to get ready for garbage collection. A given device may have resources which ought to be freed when it is no longer used. One may also choose to do resource freeing manually.
      -
    • -
    • -
      -

      restore

      -
      public Device<Object> restore(Tensor<Object> tensor)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      restore

      +
      public Device<java.lang.Object> restore(Tensor<java.lang.Object> tensor)
      +
      +
      Parameters:
      tensor - The tensor whose data ought to be restored (loaded to RAM/CPU device).
      -
      Returns:
      -
      This Storage instance, to allow for method chaining.
      +
      Returns:
      +
      This Storage instance, to allow for method chaining.
      -
    • -
    • -
      -

      store

      -
      public <T> Device<Object> store(Tensor<T> tensor)
      +
    + + + +
      +
    • +

      store

      +
      public <T> Device<java.lang.Object> store(Tensor<T> tensor)
      Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type. Classes like "OpenCLDevice" or "FileDevice" for example are tensor storages.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - A valid data type of the tensor which should be stored on this device.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor whose data ought to be stored.
      -
      Returns:
      -
      This Storage instance, to allow for method chaining.
      +
      Returns:
      +
      This Storage instance, to allow for method chaining.
      -
    • -
    • -
      -

      store

      -
      public <T> FileDevice store(Tensor<T> tensor, - String filename)
      +
    + + + +
      +
    • +

      store

      +
      public <T> FileDevice store(Tensor<T> tensor,
      +                            java.lang.String filename)
      Stores the given tensor in the file system with the given filename.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type of the tensor.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor to store
      filename - The filename of the file containing the tensor.
      -
      Returns:
      +
      Returns:
      The file device itself.
      -
    • -
    • -
      -

      store

      -
      public <T> FileDevice store(Tensor<T> tensor, - String filename, - Map<String,Object> configurations)
      +
    + + + +
      +
    • +

      store

      +
      public <T> FileDevice store(Tensor<T> tensor,
      +                            java.lang.String filename,
      +                            java.util.Map<java.lang.String,java.lang.Object> configurations)
      Stores the given tensor in the file system with the given filename.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type of the tensor.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor to store
      filename - The filename of the file containing the tensor.
      configurations - The configurations to use when storing the tensor.
      -
      Returns:
      +
      Returns:
      The file device itself.
      -
    • -
    • -
      -

      has

      -
      public <T> boolean has(Tensor<T> tensor)
      -
      Description copied from class: AbstractBaseDevice
      +
    + + + +
      +
    • +

      has

      +
      public <T> boolean has(Tensor<T> tensor)
      +
      Description copied from class: AbstractBaseDevice
      This method checks if the passed tensor - is stored on this Device instance. + is stored on this Device instance. "Stored" means that the data of the tensor was created by this device. This data is referenced inside the tensor...
      -
      -
      Specified by:
      -
      has in interface Device<Object>
      -
      Overrides:
      -
      has in class AbstractBaseDevice<Object>
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      -
      Parameters:
      +
      +
      Specified by:
      +
      has in interface Device<java.lang.Object>
      +
      Overrides:
      +
      has in class AbstractBaseDevice<java.lang.Object>
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      +
      Parameters:
      tensor - The tensor in question.
      -
      Returns:
      +
      Returns:
      The truth value of the fact that the provided tensor is on this device.
      -
    • -
    • -
      -

      free

      -
      public <T> Device<Object> free(Tensor<T> tensor)
      -
      Description copied from interface: Device
      -
      Use this to remove the provided tensor from this Device!

      -
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      -
      Parameters:
      -
      tensor - The tensor which ought to be removed from this Device.
      -
      Returns:
      +
    + + + +
      +
    • +

      free

      +
      public <T> Device<java.lang.Object> free(Tensor<T> tensor)
      +
      Description copied from interface: Device
      +
      Use this to remove the provided tensor from this Device!

      +
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      +
      Parameters:
      +
      tensor - The tensor which ought to be removed from this Device.
      +
      Returns:
      This very instance to allow for method chaining.
      -
    • -
    • -
      -

      access

      -
      public <T> Device.Access<T> access(Tensor<T> tensor)
      -
      Description copied from interface: Device
      +
    + + + +
      +
    • +

      access

      +
      public <T> Device.Access<T> access(Tensor<T> tensor)
      +
      Description copied from interface: Device
      This method exposes the tensor access API for reading from or writing to a tensor stored on this device. It may return null if this device does not support accessing stored tensors.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter of the tensor for which the access API should be returned.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor whose data ought to be accessed.
      -
      Returns:
      +
      Returns:
      The tensor access API for reading from or writing to a tensor stored on this device.
      -
    • -
    • -
      -

      approve

      -
      public Device<Object> approve(ExecutionCall<? extends Device<?>> call)
      -
      Description copied from interface: Device
      -
      This method is used internally to give Device implementations the opportunity - to perform some exception handling before the ExecutionCall will be dispatched. +
    + + + +
      +
    • +

      approve

      +
      public Device<java.lang.Object> approve(ExecutionCall<? extends Device<?>> call)
      +
      Description copied from interface: Device
      +
      This method is used internally to give Device implementations the opportunity + to perform some exception handling before the ExecutionCall will be dispatched. Use this for debugging when doing custom backend operations.
      -
      -
      Parameters:
      -
      call - The ExecutionCall which should be approved by this Device before execution.
      -
      Returns:
      +
      +
      Parameters:
      +
      call - The ExecutionCall which should be approved by this Device before execution.
      +
      Returns:
      This very instance to allow for method chaining.
      -
    • -
    • -
      -

      allocate

      -
      public <V> Data<V> allocate(DataType<V> dataType, - NDConfiguration ndc)
      -
      +
    + + + + + + + + + + + + + +
      +
    • +

      allocateFromAll

      +
      public <T> Data<T> allocateFromAll(DataType<T> dataType,
      +                                   NDConfiguration ndc,
      +                                   java.lang.Object jvmData)
    • -
    • -
      -

      optimizedOperationOf

      -
      public Operation optimizedOperationOf(Function function, - String name)
      -
      Description copied from interface: Device
      -
      This method tries to allow this device to produce an optimized Operation +
    + + + +
      +
    • +

      optimizedOperationOf

      +
      public Operation optimizedOperationOf(Function function,
      +                                      java.lang.String name)
      +
      Description copied from interface: Device
      +
      This method tries to allow this device to produce an optimized Operation based on the provided function. This is especially useful in an OpenCL context which can compile the function into native GPU kernels at runtime.
      -
      -
      Parameters:
      +
      +
      Parameters:
      function - The function which should be turned into an optimized operation.
      name - The name of the returned operation.
      -
      Returns:
      +
      Returns:
      An optimized operation based on the provided function, or null if optimization is not possible.
      -
    • -
    • -
      -

      update

      -
      public boolean update(Component.OwnerChangeRequest<Tensor<Object>> changeRequest)
      -
      Description copied from interface: Component
      +
    + + + +
      +
    • +

      update

      +
      public boolean update(Component.OwnerChangeRequest<Tensor<java.lang.Object>> changeRequest)
      +
      Description copied from interface: Component
      Components are not the slaves of their owners. If the owner registers any state changes related to a given component, then said component will be informed by the owner about the change as well as receive @@ -514,54 +659,125 @@

      update

      is being added to, or removed from, its current owner. If components hold references to their owners then this method gives them the ability to update said reference when a new owner takes over the components of an old one. - The Component.OwnerChangeRequest implementation instance passed to this method - informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). + The Component.OwnerChangeRequest implementation instance passed to this method + informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). If this method returns false then this means that this component rejects the proposed update. The component owner will then abort the proposed change.
      -
      -
      Parameters:
      -
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      -
      Returns:
      +
      +
      Parameters:
      +
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      +
      Returns:
      The truth value determining if the state change should be aborted or not.
      -
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    • -
    • -
      -

      getDirectory

      -
      public String getDirectory()
      -
      +
    + + + +
      +
    • +

      getDirectory

      +
      public java.lang.String getDirectory()
    • -
    • -
      -

      getLoadable

      -
      public List<String> getLoadable()
      -
      +
    + + + +
      +
    • +

      getLoadable

      +
      public java.util.List<java.lang.String> getLoadable()
    • -
    • -
      -

      getLoaded

      -
      public List<String> getLoaded()
      -
      +
    + + + +
      +
    • +

      getLoaded

      +
      public java.util.List<java.lang.String> getLoaded()
      +
    • +
    - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/file/FileHandle.html b/docs/jdocs/neureka/devices/file/FileHandle.html index 9d8ecb0a2..062ff2808 100644 --- a/docs/jdocs/neureka/devices/file/FileHandle.html +++ b/docs/jdocs/neureka/devices/file/FileHandle.html @@ -1,331 +1,472 @@ - + + - -FileHandle (neureka 1.0.0 API) - - - - + +FileHandle (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface FileHandle<FinalType,ValType>

    +
    neureka.devices.file
    +

    Interface FileHandle<FinalType,ValType>

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      static final neureka.devices.file.HandleFactory
      - -
       
      +
      +
      public interface FileHandle<FinalType,ValType>
      +extends Storage<ValType>
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        FACTORY

        -
        static final neureka.devices.file.HandleFactory FACTORY
        -
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            FACTORY

            +
            static final neureka.devices.file.HandleFactory FACTORY
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      load

      -
      Tensor<ValType> load() - throws IOException
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          load

          +
          Tensor<ValType> load()
          +              throws java.io.IOException
          An implementation of this method ought to create a new tensor instance containing the data which - is stored in the file whose access this FileHandle manages.
          -
          -
          Returns:
          + is stored in the file whose access this FileHandle manages.
  • +
    +
    Returns:
    A new tensor filled with the data from the targeted file.
    -
    Throws:
    -
    IOException - If loading goes wrong an exception is being thrown.
    +
    Throws:
    +
    java.io.IOException - If loading goes wrong an exception is being thrown.
    - -
  • -
    -

    free

    -
    FinalType free() - throws IOException
    + + + + +
      +
    • +

      free

      +
      FinalType free()
      +        throws java.io.IOException
      An implementation of this method ought to "free" up the memory used to store a tensor. Therefore, the method is expected to delete the underlying file - whose access this very FileHandle implementation manages. + whose access this very FileHandle implementation manages. The method also returns an instance of the final implementation of this class, meaning it adheres to the factory pattern.
      -
      -
      Returns:
      +
      +
      Returns:
      A reference of this very object in order to enable method chaining.
      -
      Throws:
      -
      IOException - Freeing / deleting resources might result in io exceptions.
      +
      Throws:
      +
      java.io.IOException - Freeing / deleting resources might result in io exceptions.
      -
  • -
  • -
    -

    getValueSize

    -
    int getValueSize()
    + + + + +
      +
    • +

      getValueSize

      +
      int getValueSize()
      This method return the size of the value which is stored - in the tensor of the file which is managed by this FileHandle. + in the tensor of the file which is managed by this FileHandle. The size however does not represent the byte size of the data. This means that the returned size is dependent on the data type of the underlying data of the file...
      -
      -
      Returns:
      +
      +
      Returns:
      The size of the value of the underlying tensor body.
      -
  • -
  • -
    -

    getDataSize

    -
    int getDataSize()
    + + + + +
      +
    • +

      getDataSize

      +
      int getDataSize()
      This method returns the byte size of the data which is stored - in the tensor of the file which is managed by this FileHandle. + in the tensor of the file which is managed by this FileHandle. The underlying datatype of the data within the file does not matter.
      -
      -
      Returns:
      +
      +
      Returns:
      The byte size of the data of the underlying tensor body.
      -
  • -
  • -
    -

    getTotalSize

    -
    int getTotalSize()
    + + + + +
      +
    • +

      getTotalSize

      +
      int getTotalSize()
      This method returns the number of bytes which are used to store the tensor in the file whose access is being managed by an implementation - of th FileHandle interface. + of th FileHandle interface. Metadata stored inside the file will also be included in this returned size.
      -
      -
      Returns:
      +
      +
      Returns:
      The byte size of all the bytes used to represent the tensor in the file.
      -
  • -
  • -
    -

    getLocation

    -
    String getLocation()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getLocation

      +
      java.lang.String getLocation()
      +
      +
      Returns:
      The full path as well as name of the file which stores a tensor.
      -
  • -
  • -
    -

    getFileName

    -
    String getFileName()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getFileName

      +
      java.lang.String getFileName()
      +
      +
      Returns:
      The name of the file which stores a tensor.
      -
  • -
  • -
    -

    getDataType

    -
    DataType<?> getDataType()
    -
    -
    Returns:
    -
    The data type of the tensor stored in the file which is managed by a FileHandle.
    + + + + +
      +
    • +

      getDataType

      +
      DataType<?> getDataType()
      +
      +
      Returns:
      +
      The data type of the tensor stored in the file which is managed by a FileHandle.
      -
  • -
  • -
    -

    getShape

    -
    Shape getShape()
    -
    -
    Returns:
    -
    The shape of the tensor stored in the file which is managed by a FileHandle.
    + + + + +
      +
    • +

      getShape

      +
      Shape getShape()
      +
      +
      Returns:
      +
      The shape of the tensor stored in the file which is managed by a FileHandle.
      -
  • -
  • -
    -

    extension

    -
    String extension()
    + + + + +
      +
    • +

      extension

      +
      java.lang.String extension()
      The file ending which comes after the '.' character...
      -
      -
      Returns:
      +
      +
      Returns:
      The file ending which implies the encoding of the data in the file.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/file/IDXHandle.html b/docs/jdocs/neureka/devices/file/IDXHandle.html index cab9712a2..bae26cff0 100644 --- a/docs/jdocs/neureka/devices/file/IDXHandle.html +++ b/docs/jdocs/neureka/devices/file/IDXHandle.html @@ -1,530 +1,731 @@ - + + - -IDXHandle (neureka 1.0.0 API) - - - - + +IDXHandle (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class IDXHandle

    +
    neureka.devices.file
    +

    Class IDXHandle

    -
    java.lang.Object -
    neureka.devices.file.IDXHandle
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.file.IDXHandle
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      FileHandle<IDXHandle,Number>, Storage<Number>
      +
      FileHandle<IDXHandle,java.lang.Number>, Storage<java.lang.Number>

      -
      public final class IDXHandle -extends Object
      +
      +
      public final class IDXHandle
      +extends java.lang.Object
      This class is one of many extensions of the AbstractFileHandle which - is therefore ultimately an implementation of the FileHandle interface. - Like other FileHandle implementations this class represents a file + is therefore ultimately an implementation of the FileHandle interface. + Like other FileHandle implementations this class represents a file of a given type, in this case it represents a IDX file.
      -
    -
    -
      - -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final String
      - -
       
      -
      protected static org.slf4j.Logger
      - -
       
      -
      protected int
      - -
       
      +
    • +
    -
    -

    Fields inherited from interface neureka.devices.file.FileHandle

    -FACTORY
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        _LOG

        -
        protected static org.slf4j.Logger _LOG
        -
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            _LOG

            +
            protected static org.slf4j.Logger _LOG
          • -
          • -
            -

            _fileName

            -
            protected final String _fileName
            -
            +
          + + + +
            +
          • +

            _fileName

            +
            protected final java.lang.String _fileName
          • -
          • -
            -

            _size

            -
            protected int _size
            -
            +
          + + + +
            +
          • +

            _size

            +
            protected int _size
          -
    • +
    -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      IDXHandle

      -
      public IDXHandle(String fileName)
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          IDXHandle

          +
          public IDXHandle(java.lang.String fileName)
        • -
        • -
          -

          IDXHandle

          -
          public IDXHandle(Tensor<Number> t, - String filename)
          -
          +
        + + + +
          +
        • +

          IDXHandle

          +
          public IDXHandle(Tensor<java.lang.Number> t,
          +                 java.lang.String filename)
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      store

      -
      public <T extends Number> IDXHandle store(Tensor<T> tensor)
      -
      Description copied from interface: Storage
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          store

          +
          public <T extends java.lang.Number> IDXHandle store(Tensor<T> tensor)
          +
          Description copied from interface: Storage
          Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type. Classes like "OpenCLDevice" or "FileDevice" for example are tensor storages.
          -
          -
          Type Parameters:
          +
          +
          Type Parameters:
          T - A valid data type of the tensor which should be stored on this device.
          -
          Parameters:
          +
          Parameters:
          tensor - The tensor whose data ought to be stored.
          -
          Returns:
          -
          This Storage instance, to allow for method chaining.
          +
          Returns:
          +
          This Storage instance, to allow for method chaining.
          -
    • -
    • -
      -

      _loadData

      -
      protected Object _loadData() - throws IOException
      -
      -
      Throws:
      -
      IOException
      +
    + + + +
      +
    • +

      _loadData

      +
      protected java.lang.Object _loadData()
      +                              throws java.io.IOException
      +
      +
      Throws:
      +
      java.io.IOException
      -
  • -
  • -
    -

    load

    -
    public Tensor<Number> load() - throws IOException
    -
    Description copied from interface: FileHandle
    + + + + +
      +
    • +

      load

      +
      public Tensor<java.lang.Number> load()
      +                              throws java.io.IOException
      +
      Description copied from interface: FileHandle
      An implementation of this method ought to create a new tensor instance containing the data which - is stored in the file whose access this FileHandle manages.
      -
      -
      Returns:
      + is stored in the file whose access this FileHandle manages.
  • +
    +
    Returns:
    A new tensor filled with the data from the targeted file.
    -
    Throws:
    -
    IOException - If loading goes wrong an exception is being thrown.
    +
    Throws:
    +
    java.io.IOException - If loading goes wrong an exception is being thrown.
    - -
  • -
    -

    getDataSize

    -
    public int getDataSize()
    -
    Description copied from interface: FileHandle
    + + + + +
      +
    • +

      getDataSize

      +
      public int getDataSize()
      +
      Description copied from interface: FileHandle
      This method returns the byte size of the data which is stored - in the tensor of the file which is managed by this FileHandle. + in the tensor of the file which is managed by this FileHandle. The underlying datatype of the data within the file does not matter.
      -
      -
      Returns:
      +
      +
      Returns:
      The byte size of the data of the underlying tensor body.
      -
  • -
  • -
    -

    getTotalSize

    -
    public int getTotalSize()
    -
    Description copied from interface: FileHandle
    + + + + +
      +
    • +

      getTotalSize

      +
      public int getTotalSize()
      +
      Description copied from interface: FileHandle
      This method returns the number of bytes which are used to store the tensor in the file whose access is being managed by an implementation - of th FileHandle interface. + of th FileHandle interface. Metadata stored inside the file will also be included in this returned size.
      -
      -
      Returns:
      +
      +
      Returns:
      The byte size of all the bytes used to represent the tensor in the file.
      -
  • -
  • -
    -

    getDataType

    -
    public DataType<?> getDataType()
    -
    -
    Returns:
    -
    The data type of the tensor stored in the file which is managed by a FileHandle.
    + + + + +
      +
    • +

      getDataType

      +
      public DataType<?> getDataType()
      +
      +
      Returns:
      +
      The data type of the tensor stored in the file which is managed by a FileHandle.
      -
  • -
  • -
    -

    getValueSize

    -
    public int getValueSize()
    -
    Description copied from interface: FileHandle
    + + + + +
      +
    • +

      getValueSize

      +
      public int getValueSize()
      +
      Description copied from interface: FileHandle
      This method return the size of the value which is stored - in the tensor of the file which is managed by this FileHandle. + in the tensor of the file which is managed by this FileHandle. The size however does not represent the byte size of the data. This means that the returned size is dependent on the data type of the underlying data of the file...
      -
      -
      Returns:
      +
      +
      Returns:
      The size of the value of the underlying tensor body.
      -
  • -
  • -
    -

    getShape

    -
    public Shape getShape()
    -
    -
    Returns:
    -
    The shape of the tensor stored in the file which is managed by a FileHandle.
    + + + + +
      +
    • +

      getShape

      +
      public Shape getShape()
      +
      +
      Returns:
      +
      The shape of the tensor stored in the file which is managed by a FileHandle.
      -
  • -
  • -
    -

    numberOfStored

    -
    public int numberOfStored()
    -
    -
    Specified by:
    -
    numberOfStored in interface Storage<C>
    -
    Returns:
    + + + + +
      +
    • +

      numberOfStored

      +
      public int numberOfStored()
      +
      +
      Specified by:
      +
      numberOfStored in interface Storage<V>
      +
      Returns:
      The number of nd-array stored on this.
      -
  • -
  • -
    -

    isEmpty

    -
    public boolean isEmpty()
    -
    -
    Specified by:
    -
    isEmpty in interface Storage<C>
    -
    Returns:
    + + + + +
      +
    • +

      isEmpty

      +
      public boolean isEmpty()
      +
      +
      Specified by:
      +
      isEmpty in interface Storage<V>
      +
      Returns:
      The truth value determining if there are no tensors stored on this or false if there are tensors stored.
      -
  • -
  • -
    -

    contains

    -
    public boolean contains(Tensor<Number> o)
    -
    -
    Specified by:
    -
    contains in interface Storage<C>
    -
    Parameters:
    + + + + +
      +
    • +

      contains

      +
      public boolean contains(Tensor<V> o)
      +
      +
      Specified by:
      +
      contains in interface Storage<V>
      +
      Parameters:
      o - The tensor which may or may not be stored on this.
      -
      Returns:
      +
      Returns:
      The truth value determining if the provided tensor is stored on this.
      -
  • -
  • -
    -

    _loadFile

    -
    protected File _loadFile()
    -
    + + + + + + + + +
      +
    • +

      _loadFileInputStream

      +
      protected java.io.FileInputStream _loadFileInputStream()
      +                                                throws java.io.IOException
      +
      +
      Throws:
      +
      java.io.IOException
      -
    • -
    • -
      -

      free

      -
      public IDXHandle free()
      -
      Description copied from interface: FileHandle
      +
    + + + +
      +
    • +

      free

      +
      public C free()
      +
      Description copied from interface: FileHandle
      An implementation of this method ought to "free" up the memory used to store a tensor. Therefore, the method is expected to delete the underlying file - whose access this very FileHandle implementation manages. + whose access this very FileHandle implementation manages. The method also returns an instance of the final implementation of this class, meaning it adheres to the factory pattern.
      -
      -
      Specified by:
      -
      free in interface FileHandle<C,V>
      -
      Returns:
      +
      +
      Specified by:
      +
      free in interface FileHandle<C,V>
      +
      Returns:
      A reference of this very object in order to enable method chaining.
      -
    • -
    • -
      -

      getLocation

      -
      public String getLocation()
      -
      -
      Specified by:
      -
      getLocation in interface FileHandle<C,V>
      -
      Returns:
      +
    + + + +
      +
    • +

      getLocation

      +
      public java.lang.String getLocation()
      +
      +
      Specified by:
      +
      getLocation in interface FileHandle<C,V>
      +
      Returns:
      The full path as well as name of the file which stores a tensor.
      -
    • -
    • -
      -

      getFileName

      -
      public String getFileName()
      -
      -
      Specified by:
      -
      getFileName in interface FileHandle<C,V>
      -
      Returns:
      +
    + + + +
      +
    • +

      getFileName

      +
      public java.lang.String getFileName()
      +
      +
      Specified by:
      +
      getFileName in interface FileHandle<C,V>
      +
      Returns:
      The name of the file which stores a tensor.
      -
    • -
    • -
      -

      restore

      -
      public Storage<Number> restore(Tensor<Number> tensor)
      -
      -
      Specified by:
      -
      restore in interface Storage<C>
      -
      Parameters:
      +
    + + + +
      +
    • +

      restore

      +
      public Storage<V> restore(Tensor<V> tensor)
      +
      +
      Specified by:
      +
      restore in interface Storage<V>
      +
      Parameters:
      tensor - The tensor whose data ought to be restored (loaded to RAM/CPU device).
      -
      Returns:
      -
      This Storage instance, to allow for method chaining.
      +
      Returns:
      +
      This Storage instance, to allow for method chaining.
      -
    • -
    • -
      -

      extension

      -
      public final String extension()
      -
      Description copied from interface: FileHandle
      +
    + + + +
      +
    • +

      extension

      +
      public final java.lang.String extension()
      +
      Description copied from interface: FileHandle
      The file ending which comes after the '.' character...
      -
      -
      Specified by:
      -
      extension in interface FileHandle<C,V>
      -
      Returns:
      +
      +
      Specified by:
      +
      extension in interface FileHandle<C,V>
      +
      Returns:
      The file ending which implies the encoding of the data in the file.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/file/package-frame.html b/docs/jdocs/neureka/devices/file/package-frame.html new file mode 100644 index 000000000..0e484ba7f --- /dev/null +++ b/docs/jdocs/neureka/devices/file/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.devices.file (neureka 1.0.1 API) + + + + +

    neureka.devices.file

    +
    +

    Interfaces

    + +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/devices/file/package-summary.html b/docs/jdocs/neureka/devices/file/package-summary.html index 0275cdf80..67a14a44d 100644 --- a/docs/jdocs/neureka/devices/file/package-summary.html +++ b/docs/jdocs/neureka/devices/file/package-summary.html @@ -1,118 +1,171 @@ - + + - -neureka.devices.file (neureka 1.0.0 API) - - - - + +neureka.devices.file (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.devices.file

    -
    -
    -
    package neureka.devices.file
    -
    -
      -
    • - +
      +
        +
      • + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        FileHandle<FinalType,ValType> 
      • -
      • -
        -
        -
        -
        -
        Class
        -
        Description
        - -
        +
      • + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        CSVHandle
        This class is one of many extensions of the AbstractFileHandle which - is therefore ultimately an implementation of the FileHandle interface.
        - - -
        -
        The FileDevice is a Device implementation + is therefore ultimately an implementation of the FileHandle interface.
        +
        FileDevice +
        The FileDevice is a Device implementation responsible for reading tensors from and or writing them to a given directory.
        - -
        FileHandle<FinalType,ValType>
        -
         
        - -
        +
        IDXHandle
        This class is one of many extensions of the AbstractFileHandle which - is therefore ultimately an implementation of the FileHandle interface.
        - - - - + is therefore ultimately an implementation of the FileHandle interface. +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/file/package-tree.html b/docs/jdocs/neureka/devices/file/package-tree.html index c37fdcf17..5dde2e17c 100644 --- a/docs/jdocs/neureka/devices/file/package-tree.html +++ b/docs/jdocs/neureka/devices/file/package-tree.html @@ -1,87 +1,148 @@ - + + - -neureka.devices.file Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.devices.file Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.devices.file

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/host/CPU.IndexedWorkload.html b/docs/jdocs/neureka/devices/host/CPU.IndexedWorkload.html index 3278b980e..fa48bd55c 100644 --- a/docs/jdocs/neureka/devices/host/CPU.IndexedWorkload.html +++ b/docs/jdocs/neureka/devices/host/CPU.IndexedWorkload.html @@ -1,133 +1,227 @@ - + + - -CPU.IndexedWorkload (neureka 1.0.0 API) - - - - + +CPU.IndexedWorkload (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface CPU.IndexedWorkload

    +
    neureka.devices.host
    +

    Interface CPU.IndexedWorkload

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      CPU
      +
      CPU
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public static interface CPU.IndexedWorkload
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      void
      -
      execute(int i)
      -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public static interface CPU.IndexedWorkload
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        execute

        -
        void execute(int i)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            execute

            +
            void execute(int i)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/CPU.JVMExecutor.html b/docs/jdocs/neureka/devices/host/CPU.JVMExecutor.html index f8ea047ed..404328363 100644 --- a/docs/jdocs/neureka/devices/host/CPU.JVMExecutor.html +++ b/docs/jdocs/neureka/devices/host/CPU.JVMExecutor.html @@ -1,299 +1,433 @@ - + + - -CPU.JVMExecutor (neureka 1.0.0 API) - - - - + +CPU.JVMExecutor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPU.JVMExecutor

    -
    -
    java.lang.Object -
    neureka.devices.host.CPU.JVMExecutor
    +
    neureka.devices.host
    +

    Class CPU.JVMExecutor

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.host.CPU.JVMExecutor
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      CPU
      +
      CPU

      -
      public static class CPU.JVMExecutor -extends Object
      -
      The CPU.JVMExecutor offers a similar functionality as the parallel stream API, - however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas +
      +
      public static class CPU.JVMExecutor
      +extends java.lang.Object
      +
      The CPU.JVMExecutor offers a similar functionality as the parallel stream API, + however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas instead of simply exposing a single index or concrete elements for a given workload size. - This means that a CPU.RangeWorkload lambda will be called with the work range of a single worker thread + This means that a CPU.RangeWorkload lambda will be called with the work range of a single worker thread processing its current workload. This range is dependent on the number of available threads as well as the size of the workload. If the workload is very small, then the current main thread will process the entire workload range - whereas the underlying ThreadPoolExecutor will not be used to avoid unnecessary overhead.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      + whereas the underlying ThreadPoolExecutor will not be used to avoid unnecessary overhead.
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        JVMExecutor

        -
        public JVMExecutor()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            JVMExecutor

            +
            public JVMExecutor()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getActiveThreadCount

      -
      public int getActiveThreadCount()
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getActiveThreadCount

          +
          public int getActiveThreadCount()
          Returns the approximate number of threads that are actively executing tasks.
          -
          -
          Returns:
          +
          +
          Returns:
          the number of threads
          -
    • -
    • -
      -

      getCorePoolSize

      -
      public int getCorePoolSize()
      +
    + + + +
      +
    • +

      getCorePoolSize

      +
      public int getCorePoolSize()
      Returns the core number of threads.
      -
      -
      Returns:
      +
      +
      Returns:
      the core number of threads
      -
  • -
  • -
    -

    getCompletedTaskCount

    -
    public long getCompletedTaskCount()
    + + + + +
      +
    • +

      getCompletedTaskCount

      +
      public long getCompletedTaskCount()
      Returns the approximate total number of tasks that have completed execution. Because the states of tasks and threads may change dynamically during computation, the returned value is only an approximation, but one that does not ever decrease across successive calls.
      -
      -
      Returns:
      +
      +
      Returns:
      the number of tasks
      -
  • -
  • -
    -

    threaded

    -
    public void threaded(int workloadSize, - CPU.RangeWorkload workload)
    + + + + +
      +
    • +

      threaded

      +
      public void threaded(int workloadSize,
      +                     CPU.RangeWorkload workload)
      This method slices the provided workload size into multiple ranges which can be executed in parallel.
      -
      -
      Parameters:
      +
      +
      Parameters:
      workloadSize - The total workload size which ought to be split into multiple ranges.
      workload - The range lambda which ought to be executed across multiple threads.
      -
  • -
  • -
    -

    threaded

    -
    public void threaded(int numberOfWorkloads, - CPU.IndexedWorkload workload)
    + + + + +
      +
    • +

      threaded

      +
      public void threaded(int numberOfWorkloads,
      +                     CPU.IndexedWorkload workload)
      Executes the provided workload lambda across multiple threads where the provided worker lambda will receive the index/id of the current worker.
      -
      -
      Parameters:
      +
      +
      Parameters:
      numberOfWorkloads - The total number of workloads to be executed.
      workload - The workload lambda to be executed.
      -
  • -
  • -
    -

    sequential

    -
    public void sequential(int workloadSize, - CPU.RangeWorkload workload)
    -
    This method will simply execute the provided CPU.RangeWorkload lambda sequentially + + + + +
      +
    • +

      sequential

      +
      public void sequential(int workloadSize,
      +                       CPU.RangeWorkload workload)
      +
      This method will simply execute the provided CPU.RangeWorkload lambda sequentially with 0 as the start index and workloadSize as the exclusive range.

      -
      -
      Parameters:
      -
      workloadSize - The workload size which will be passed to the provided CPU.RangeWorkload as second argument.
      -
      workload - The CPU.RangeWorkload which will be executed sequentially.
      +
      +
      Parameters:
      +
      workloadSize - The workload size which will be passed to the provided CPU.RangeWorkload as second argument.
      +
      workload - The CPU.RangeWorkload which will be executed sequentially.
      -
  • -
  • -
    -

    threaded

    -
    public void threaded(int first, - int limit, - CPU.RangeWorkload rangeWorkload)
    + + + + +
      +
    • +

      threaded

      +
      public void threaded(int first,
      +                     int limit,
      +                     CPU.RangeWorkload rangeWorkload)
      Takes the provided range and divides it into multithreaded workloads.
      -
      -
      Parameters:
      +
      +
      Parameters:
      first - The start index of the threaded workload range.
      limit - The limit for the workload range, which is exclusive.
      rangeWorkload - A workload lambda which will be called by different threads with different sub-ranges.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/CPU.RangeWorkload.html b/docs/jdocs/neureka/devices/host/CPU.RangeWorkload.html index 38739629d..db0cf0ce0 100644 --- a/docs/jdocs/neureka/devices/host/CPU.RangeWorkload.html +++ b/docs/jdocs/neureka/devices/host/CPU.RangeWorkload.html @@ -1,138 +1,232 @@ - + + - -CPU.RangeWorkload (neureka 1.0.0 API) - - - - + +CPU.RangeWorkload (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface CPU.RangeWorkload

    +
    neureka.devices.host
    +

    Interface CPU.RangeWorkload

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      CPU
      +
      CPU
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public static interface CPU.RangeWorkload
      +
      +
      @FunctionalInterface
      +public static interface CPU.RangeWorkload
      A simple functional interface for executing a range whose implementations will either be executed sequentially or they are being dispatched to a thread-pool, given that the provided workload is large enough.
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      void
      -
      execute(int start, - int end)
      -
       
      -
      -
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        execute

        -
        void execute(int start, - int end)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            execute

            +
            void execute(int start,
            +             int end)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/CPU.html b/docs/jdocs/neureka/devices/host/CPU.html index 3c2d9c1b2..6f767035a 100644 --- a/docs/jdocs/neureka/devices/host/CPU.html +++ b/docs/jdocs/neureka/devices/host/CPU.html @@ -1,702 +1,950 @@ - + + - -CPU (neureka 1.0.0 API) - - - - + +CPU (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CPU

    -
    -
    java.lang.Object -
    neureka.devices.AbstractBaseDevice<V> -
    neureka.devices.AbstractDevice<Object> -
    neureka.devices.host.CPU
    -
    +
    neureka.devices.host
    +

    Class CPU

    -
    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Component<Tensor<Object>>, Device<Object>, Storage<Object>
      +
      Component<Tensor<java.lang.Object>>, Device<java.lang.Object>, Storage<java.lang.Object>

      -
      public class CPU -extends AbstractDevice<Object>
      -
      The CPU class, one of many implementations of the Device interface, +
      +
      public class CPU
      +extends AbstractDevice<java.lang.Object>
      +
      The CPU class, one of many implementations of the Device interface, is simply supposed to be an API for dispatching threaded workloads onto the CPU as well as reading from or writing to tensors it stores. Contrary to other types of devices, the CPU will represent a tensors' data by default, simply because the tensors will be stored in RAM (JVM heap) by default if no device was specified. - This means that they are implicitly "stored" on the CPU device. - The class is also a singleton instead of being part of a BackendExtension.
      -
    -
    -
      + This means that they are implicitly "stored" on the CPU device. + The class is also a singleton instead of being part of a BackendExtension.
    + + +
    +
    +
    - - -
     
    -
    boolean
    - -
    -
    This method is part of the component system built into the Tensor class.
    -
    -
    -
    -
    -
    -

    Methods inherited from class neureka.devices.AbstractDevice

    -_cleaning, access, approve
    -
    -

    Methods inherited from class neureka.devices.AbstractBaseDevice

    -contains, has, isEmpty, numberOfDataObjects, numberOfStored
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.devices.Device

    -allocate, borrow, optimizedFunctionOf
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          PARALLELIZATION_THRESHOLD

          -
          public static final int PARALLELIZATION_THRESHOLD
          -
          -
          See Also:
          -
          - -
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              PARALLELIZATION_THRESHOLD

              +
              public static final int PARALLELIZATION_THRESHOLD
              +
              +
              See Also:
              +
              Constant Field Values
              -
        • -
        • -
          -

          THREAD_PREFIX

          -
          public static final String THREAD_PREFIX
          -
          -
          See Also:
          -
          - -
          +
        + + + +
          +
        • +

          THREAD_PREFIX

          +
          public static final java.lang.String THREAD_PREFIX
          +
          +
          See Also:
          +
          Constant Field Values
          -
      -
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      get

      -
      public static CPU get()
      -
      Use this method to access the singleton instance of this CPU class, - which is a Device type and default location for freshly instantiated Tensor instances. - Tensor instances located on the CPU device will reside in regular RAM +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          get

          +
          public static CPU get()
          +
          Use this method to access the singleton instance of this CPU class, + which is a Device type and default location for freshly instantiated Tensor instances. + Tensor instances located on the CPU device will reside in regular RAM causing operations to run on the JVM and thereby the CPU.
          -
          -
          Returns:
          -
          The singleton instance of this CPU class.
          +
          +
          Returns:
          +
          The singleton instance of this CPU class.
          -
    • -
    • -
      -

      getExecutor

      -
      public CPU.JVMExecutor getExecutor()
      -
      The CPU.JVMExecutor offers a similar functionality as the parallel stream API, - however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas +
    + + + +
      +
    • +

      getExecutor

      +
      public CPU.JVMExecutor getExecutor()
      +
      The CPU.JVMExecutor offers a similar functionality as the parallel stream API, + however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas instead of simply exposing a single index or concrete elements for a given workload size.
      -
      -
      Returns:
      +
      +
      Returns:
      A parallel range based execution API running on the JVM.
      -
  • -
  • -
    -

    _approveExecutionOf

    -
    protected boolean _approveExecutionOf(Tensor<?>[] tensors, - int d, - Operation operation)
    -
    Description copied from class: AbstractDevice
    + + + + +
      +
    • +

      _approveExecutionOf

      +
      protected boolean _approveExecutionOf(Tensor<?>[] tensors,
      +                                      int d,
      +                                      Operation operation)
      +
      Description copied from class: AbstractDevice
      This method is the internal approval routine called by its public counterpart and implemented by classes extending this very abstract class. - It may or may not be called by an Algorithm - in order to allow a Device to checked if the provided arguments are suitable for execution.
      -
      -
      Specified by:
      -
      _approveExecutionOf in class AbstractDevice<Object>
      -
      Parameters:
      + It may or may not be called by an Algorithm + in order to allow a Device to checked if the provided arguments are suitable for execution.
  • +
    +
    Specified by:
    +
    _approveExecutionOf in class AbstractDevice<java.lang.Object>
    +
    Parameters:
    tensors - An array of input tensors.
    d - The index of the input which ought to be derived.
    operation - The type of operation.
    -
    Returns:
    +
    Returns:
    The truth value determining if the provided arguments can be executed.
    - -
  • -
    -

    dispose

    -
    public void dispose()
    + + + + +
      +
    • +

      dispose

      +
      public void dispose()
      This method will shut down the internal thread-pool used by this class to execute JVM/CPU based operations in parallel.
      -
  • -
  • -
    -

    restore

    -
    public CPU restore(Tensor<Object> tensor)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      restore

      +
      public CPU restore(Tensor<java.lang.Object> tensor)
      +
      +
      Parameters:
      tensor - The tensor whose data ought to be restored (loaded to RAM/CPU device).
      -
      Returns:
      -
      This Storage instance, to allow for method chaining.
      +
      Returns:
      +
      This Storage instance, to allow for method chaining.
      -
  • -
  • -
    -

    store

    -
    public <T> CPU store(Tensor<T> tensor)
    -
    Description copied from class: AbstractDevice
    + + + + +
      +
    • +

      store

      +
      public <T> CPU store(Tensor<T> tensor)
      +
      Description copied from class: AbstractDevice
      Implementations of this method ought to store the data of the tensor in whatever formant suites the underlying implementation and or final type. Classes like "OpenCLDevice" or "FileDevice" for example are tensor storages.
      -
      -
      Specified by:
      -
      store in interface Storage<Object>
      -
      Overrides:
      -
      store in class AbstractDevice<Object>
      -
      Type Parameters:
      +
      +
      Specified by:
      +
      store in interface Storage<java.lang.Object>
      +
      Overrides:
      +
      store in class AbstractDevice<java.lang.Object>
      +
      Type Parameters:
      T - A valid data type of the tensor which should be stored on this device.
      -
      Parameters:
      +
      Parameters:
      tensor - The tensor whose data ought to be stored.
      -
      Returns:
      -
      This Storage instance, to allow for method chaining.
      +
      Returns:
      +
      This Storage instance, to allow for method chaining.
      -
  • -
  • -
    -

    _sizeOccupiedBy

    -
    protected final <T> int _sizeOccupiedBy(Tensor<T> tensor)
    -
    -
    Specified by:
    -
    _sizeOccupiedBy in class AbstractDevice<Object>
    + + + + +
  • -
  • -
    -

    _readAll

    -
    protected final <T> Object _readAll(Tensor<T> tensor, - boolean clone)
    -
    -
    Specified by:
    -
    _readAll in class AbstractDevice<Object>
    + + + + +
      +
    • +

      _readAll

      +
      protected final <T> java.lang.Object _readAll(Tensor<T> tensor,
      +                                              boolean clone)
      +
      +
      Specified by:
      +
      _readAll in class AbstractDevice<java.lang.Object>
      -
  • -
  • -
    -

    _readItem

    -
    protected final <T> T _readItem(Tensor<T> tensor, - int index)
    -
    -
    Specified by:
    -
    _readItem in class AbstractDevice<Object>
    + + + + +
      +
    • +

      _readItem

      +
      protected final <T> T _readItem(Tensor<T> tensor,
      +                                int index)
      +
      +
      Specified by:
      +
      _readItem in class AbstractDevice<java.lang.Object>
      -
  • -
  • -
    -

    _readArray

    -
    protected final <T, -A> A _readArray(Tensor<T> tensor, - Class<A> arrayType, - int start, - int size)
    -
    -
    Specified by:
    -
    _readArray in class AbstractDevice<Object>
    + + + + +
      +
    • +

      _readArray

      +
      protected final <T,A> A _readArray(Tensor<T> tensor,
      +                                   java.lang.Class<A> arrayType,
      +                                   int start,
      +                                   int size)
      +
      +
      Specified by:
      +
      _readArray in class AbstractDevice<java.lang.Object>
      -
  • -
  • -
    -

    _writeItem

    -
    protected final <T> void _writeItem(Tensor<T> tensor, - T item, - int start, - int size)
    -
    -
    Specified by:
    -
    _writeItem in class AbstractDevice<Object>
    + + + + + + +
      +
    • +

      _writeItem

      +
      protected final <T> void _writeItem(Tensor<T> tensor,
      +                                    T item,
      +                                    int start,
      +                                    int size)
      +
      +
      Specified by:
      +
      _writeItem in class AbstractDevice<java.lang.Object>
      -
  • -
  • -
    -

    _writeArray

    -
    protected final <T> void _writeArray(Tensor<T> tensor, - Object array, - int offset, - int start, - int size)
    -
    -
    Specified by:
    -
    _writeArray in class AbstractDevice<Object>
    + + + + +
      +
    • +

      _writeArray

      +
      protected final <T> void _writeArray(Tensor<T> tensor,
      +                                     java.lang.Object array,
      +                                     int offset,
      +                                     int start,
      +                                     int size)
      +
      +
      Specified by:
      +
      _writeArray in class AbstractDevice<java.lang.Object>
      -
  • -
  • -
    -

    allocateFromOne

    -
    public <T> Data<T> allocateFromOne(DataType<T> dataType, - NDConfiguration ndc, - T initialValue)
    -
    + + + + + + + + + + +
      +
    • +

      allocateFromAll

      +
      public <T> Data<T> allocateFromAll(DataType<T> dataType,
      +                                   NDConfiguration ndc,
      +                                   java.lang.Object jvmData)
    • -
    • -
      -

      allocate

      -
      public final <T> Data<T> allocate(Class<T> type, - Object data)
      -
      +
    + + + +
      +
    • +

      allocate

      +
      public final <T> Data<T> allocate(java.lang.Class<T> type,
      +                                  java.lang.Object data)
    • -
    • -
      -

      allocate

      -
      public final <T> Data<T> allocate(Class<T> type, - int size, - Object source)
      -
      +
    + + + +
      +
    • +

      allocate

      +
      public final <T> Data<T> allocate(java.lang.Class<T> type,
      +                                  int size,
      +                                  java.lang.Object source)
    • -
    • -
      -

      _actualize

      -
      protected final Data<Object> _actualize(Tensor<?> tensor)
      -
      -
      Specified by:
      -
      _actualize in class AbstractDevice<Object>
      +
    + + + + + + + + + + + +
      +
    • +

      _dataTypeOf

      +
      protected final DataType<?> _dataTypeOf(java.lang.Object rawData)
      +
      +
      Specified by:
      +
      _dataTypeOf in class AbstractDevice<java.lang.Object>
      -
    • -
    • -
      -

      free

      -
      public <T> CPU free(Tensor<T> tensor)
      -
      Description copied from interface: Device
      -
      Use this to remove the provided tensor from this Device!

      -
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      -
      Parameters:
      -
      tensor - The tensor which ought to be removed from this Device.
      -
      Returns:
      +
    + + + +
      +
    • +

      free

      +
      public <T> CPU free(Tensor<T> tensor)
      +
      Description copied from interface: Device
      +
      Use this to remove the provided tensor from this Device!

      +
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      +
      Parameters:
      +
      tensor - The tensor which ought to be removed from this Device.
      +
      Returns:
      This very instance to allow for method chaining.
      -
    • -
    • -
      -

      _swap

      -
      protected <T> void _swap(Tensor<T> former, - Tensor<T> replacement)
      -
      Description copied from class: AbstractDevice
      +
    + + + +
      +
    • +

      _swap

      +
      protected <T> void _swap(Tensor<T> former,
      +                         Tensor<T> replacement)
      +
      Description copied from class: AbstractDevice
      This method is used internally mostly and should not be used in most cases.

      -
      -
      Specified by:
      -
      _swap in class AbstractDevice<Object>
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensors, which must be supported by this Device.
      -
      Parameters:
      +
      +
      Specified by:
      +
      _swap in class AbstractDevice<java.lang.Object>
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensors, which must be supported by this Device.
      +
      Parameters:
      former - The tensor whose associated data (on the device) ought to be assigned to the other tensor.
      replacement - The tensor which ought to receive the data of the former tensor internally.
      -
    • -
    • -
      -

      allocate

      -
      public <T> Data<T> allocate(DataType<T> dataType, - NDConfiguration ndc)
      -
      +
    + + + + + + + +
      +
    • +

      optimizedOperationOf

      +
      public Operation optimizedOperationOf(Function function,
      +                                      java.lang.String name)
      +
      Description copied from interface: Device
      +
      This method tries to allow this device to produce an optimized Operation based on the provided function. This is especially useful in an OpenCL context which can compile the function into native GPU kernels at runtime.
      -
      -
      Parameters:
      +
      +
      Parameters:
      function - The function which should be turned into an optimized operation.
      name - The name of the returned operation.
      -
      Returns:
      +
      Returns:
      An optimized operation based on the provided function, or null if optimization is not possible.
      -
    • -
    • -
      -

      update

      -
      public boolean update(Component.OwnerChangeRequest<Tensor<Object>> changeRequest)
      -
      This method is part of the component system built into the Tensor class. +
    + + + +
      +
    • +

      update

      +
      public boolean update(Component.OwnerChangeRequest<Tensor<java.lang.Object>> changeRequest)
      +
      This method is part of the component system built into the Tensor class. Do not use this as part of anything but said component system.
      -
      -
      Specified by:
      -
      update in interface Component<Tensor<Object>>
      -
      Overrides:
      -
      update in class AbstractDevice<Object>
      -
      Parameters:
      +
      +
      Specified by:
      +
      update in interface Component<Tensor<java.lang.Object>>
      +
      Overrides:
      +
      update in class AbstractDevice<java.lang.Object>
      +
      Parameters:
      changeRequest - An API which describes the type of update and a method for executing said update.
      -
      Returns:
      -
      The truth value determining if this Device ought to be added to a tensor (Here always false!).
      +
      Returns:
      +
      The truth value determining if this Device ought to be added to a tensor (Here always false!).
      -
    • -
    • -
      -

      getCoreCount

      -
      public int getCoreCount()
      +
    + + + +
      +
    • +

      getCoreCount

      +
      public int getCoreCount()
      Returns the number of CPU cores available to the Java virtual machine. This value may change during a particular invocation of the virtual machine. Applications that are sensitive to the number of available processors should therefore occasionally poll this property and adjust their resource usage appropriately.
      -
      -
      Returns:
      +
      +
      Returns:
      The maximum number of CPU cores available to the JVM. This number is never smaller than one!
      -
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/concurrent/Parallelism.html b/docs/jdocs/neureka/devices/host/concurrent/Parallelism.html index 31e396c6e..a67a037da 100644 --- a/docs/jdocs/neureka/devices/host/concurrent/Parallelism.html +++ b/docs/jdocs/neureka/devices/host/concurrent/Parallelism.html @@ -1,295 +1,440 @@ - + + - -Parallelism (neureka 1.0.0 API) - - - - + +Parallelism (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class Parallelism

    -
    -
    java.lang.Object -
    java.lang.Enum<Parallelism> -
    neureka.devices.host.concurrent.Parallelism
    -
    +
    neureka.devices.host.concurrent
    +

    Enum Parallelism

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<Parallelism>
      • +
      • +
          +
        • neureka.devices.host.concurrent.Parallelism
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
    + +
    +
    +
      +
    • -
    • -
      -

      Enum Constant Summary

      -
      Enum Constants
      -
      -
      Enum Constant
      -
      Description
      - -
      +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        CORES
        The number of CPU cores
        - - -
        +
        EIGHT
        8
        - - -
        +
        FOUR
        4
        - - -
        +
        ONE
        1
        - - -
        +
        THREADS
        The total number of threads (incl.
        - - -
        +
        TWO
        2
        - - -
        +
        UNITS
        The number of top level (L2 or L3) cache units
        - - - +
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      int
      - -
       
      - - -
      -
      Returns the enum constant of this class with the specified name.
      -
      -
      static Parallelism[]
      - -
      -
      Returns an array containing the constants of this enum class, in + -
      -
      -
        + +
      +
    • +
      +
        +
      • -
      • -
        -

        Enum Constant Details

        -
          -
        • -
          -

          THREADS

          -
          public static final Parallelism THREADS
          +
            +
          • + + +

            Enum Constant Detail

            + + + +
              +
            • +

              THREADS

              +
              public static final Parallelism THREADS
              The total number of threads (incl. hyper-threads)
              -
        • -
        • -
          -

          CORES

          -
          public static final Parallelism CORES
          +
        + + + +
          +
        • +

          CORES

          +
          public static final Parallelism CORES
          The number of CPU cores
          -
      • -
      • -
        -

        UNITS

        -
        public static final Parallelism UNITS
        +
      + + + +
        +
      • +

        UNITS

        +
        public static final Parallelism UNITS
        The number of top level (L2 or L3) cache units
        -
    • -
    • -
      -

      EIGHT

      -
      public static final Parallelism EIGHT
      +
    + + + + + + + + + + + + + + + + - + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static Parallelism[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Parallelism[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Parallelism c : Parallelism.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static Parallelism valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static Parallelism valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • -
  • -
    -

    getAsInt

    -
    public int getAsInt()
    -
    -
    Specified by:
    -
    getAsInt in interface IntSupplier
    + + + + +
      +
    • +

      getAsInt

      +
      public int getAsInt()
      +
      +
      Specified by:
      +
      getAsInt in interface java.util.function.IntSupplier
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.Divider.html b/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.Divider.html index dd224dc85..ab7037bf1 100644 --- a/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.Divider.html +++ b/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.Divider.html @@ -1,209 +1,335 @@ - + + - -WorkScheduler.Divider (neureka 1.0.0 API) - - - - + +WorkScheduler.Divider (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class WorkScheduler.Divider

    -
    -
    java.lang.Object -
    neureka.devices.host.concurrent.WorkScheduler.Divider
    +
    neureka.devices.host.concurrent
    +

    Class WorkScheduler.Divider

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.host.concurrent.WorkScheduler.Divider
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      WorkScheduler
      +
      WorkScheduler

      -
      public static final class WorkScheduler.Divider -extends Object
      +
      +
      public static final class WorkScheduler.Divider
      +extends java.lang.Object
      Divides workloads until they can be processed efficiently and then submits them to a thread pool for execution...
      This is a library internal class, do not depend on this.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Divider

        -
        public Divider(ExecutorService executor)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Divider

            +
            public Divider(java.util.concurrent.ExecutorService executor)
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.html b/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.html index ab786507b..cbd3955c2 100644 --- a/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.html +++ b/docs/jdocs/neureka/devices/host/concurrent/WorkScheduler.html @@ -1,205 +1,323 @@ - + + - -WorkScheduler (neureka 1.0.0 API) - - - - + +WorkScheduler (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class WorkScheduler

    -
    -
    java.lang.Object -
    neureka.devices.host.concurrent.WorkScheduler
    +
    neureka.devices.host.concurrent
    +

    Class WorkScheduler

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.host.concurrent.WorkScheduler
      • +
      +
    • +
    +
    +
      +

    • -
      public abstract class WorkScheduler -extends Object
      +
      +
      public abstract class WorkScheduler
      +extends java.lang.Object
      An API for registering workloads which will be divided into smaller workloads so that they can be executed efficiently by a thread pool...
      This is a library internal class, do not depend on this.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static final class 
      - -
      +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class WorkScheduler.Divider
        Divides workloads until they can be processed efficiently and then submits them to a thread pool for execution...
        - - - +
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        WorkScheduler() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      protected abstract void
      -
      _work(int first, - int limit)
      -
       
      -
      final void
      -
      invoke(ExecutorService executor, - int first, - int limit, - int threshold)
      -
      +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        WorkScheduler

        -
        public WorkScheduler()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            WorkScheduler

            +
            public WorkScheduler()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      invoke

      -
      public final void invoke(ExecutorService executor, - int first, - int limit, - int threshold)
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          invoke

          +
          public final void invoke(java.util.concurrent.ExecutorService executor,
          +                         int first,
          +                         int limit,
          +                         int threshold)
          Synchronous execution - wait until it's finished.
          -
          -
          Parameters:
          +
          +
          Parameters:
          first - The first index, in a range, to include.
          limit - The first index NOT to include - last (excl.) index in a range.
          threshold - The work size threshold.
          -
    • -
    • -
      -

      _work

      -
      protected abstract void _work(int first, - int limit)
      -
      +
    + + + +
      +
    • +

      _work

      +
      protected abstract void _work(int first,
      +                              int limit)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/concurrent/package-frame.html b/docs/jdocs/neureka/devices/host/concurrent/package-frame.html new file mode 100644 index 000000000..11a6d7add --- /dev/null +++ b/docs/jdocs/neureka/devices/host/concurrent/package-frame.html @@ -0,0 +1,24 @@ + + + + + +neureka.devices.host.concurrent (neureka 1.0.1 API) + + + + +

    neureka.devices.host.concurrent

    +
    +

    Classes

    + +

    Enums

    + +
    + + diff --git a/docs/jdocs/neureka/devices/host/concurrent/package-summary.html b/docs/jdocs/neureka/devices/host/concurrent/package-summary.html index ae564c810..575321503 100644 --- a/docs/jdocs/neureka/devices/host/concurrent/package-summary.html +++ b/docs/jdocs/neureka/devices/host/concurrent/package-summary.html @@ -1,121 +1,178 @@ - + + - -neureka.devices.host.concurrent (neureka 1.0.0 API) - - - - + +neureka.devices.host.concurrent (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.devices.host.concurrent

    -
    -
    -
    package neureka.devices.host.concurrent
    -
    -
    Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!
    -
    -
    -
      -
    • -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      -
      A set of standard levels of parallelism derived from the number of available cores and optionally capped by - reserving a specified amount of memory per thread.
      -
      - -
      +

      See: Description

      +
      +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        WorkScheduler
        An API for registering workloads which will be divided into smaller workloads so that they can be executed efficiently by a thread pool...
        - - -
        +
        WorkScheduler.Divider
        Divides workloads until they can be processed efficiently and then submits them to a thread pool for execution...
        - - - - +
        +
      • +
      • + + + + + + + + + + + + +
        Enum Summary 
        EnumDescription
        Parallelism +
        A set of standard levels of parallelism derived from the number of available cores and optionally capped by + reserving a specified amount of memory per thread.
        +
      -
    -
    + + + +

    Package neureka.devices.host.concurrent Description

    +
    Everything in this package should be considered library-private! + DO NOT USE CLASSES INSIDE THIS PACKAGE!
    + + + + diff --git a/docs/jdocs/neureka/devices/host/concurrent/package-tree.html b/docs/jdocs/neureka/devices/host/concurrent/package-tree.html index fee31d0e8..479fd5652 100644 --- a/docs/jdocs/neureka/devices/host/concurrent/package-tree.html +++ b/docs/jdocs/neureka/devices/host/concurrent/package-tree.html @@ -1,86 +1,147 @@ - + + - -neureka.devices.host.concurrent Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.devices.host.concurrent Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.devices.host.concurrent

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • java.lang.Enum<E> (implements java.lang.Comparable<T>, java.lang.constant.Constable, java.io.Serializable) +
      • java.lang.Enum<E> (implements java.lang.Comparable<T>, java.io.Serializable)
          -
        • neureka.devices.host.concurrent.Parallelism (implements java.util.function.IntSupplier)
        • +
        • neureka.devices.host.concurrent.Parallelism (implements java.util.function.IntSupplier)
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/host/machine/BasicMachine.html b/docs/jdocs/neureka/devices/host/machine/BasicMachine.html index e7a1df8c5..08524cb8c 100644 --- a/docs/jdocs/neureka/devices/host/machine/BasicMachine.html +++ b/docs/jdocs/neureka/devices/host/machine/BasicMachine.html @@ -1,236 +1,365 @@ - + + - -BasicMachine (neureka 1.0.0 API) - - - - + +BasicMachine (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class BasicMachine

    -
    -
    java.lang.Object -
    neureka.devices.host.machine.BasicMachine
    +
    neureka.devices.host.machine
    +

    Class BasicMachine

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.host.machine.BasicMachine
      • +
      +
    • +
    +
    +
      +
    • +
      Direct Known Subclasses:
      -
      CommonMachine
      +
      CommonMachine

      -
      public class BasicMachine -extends Object
      +
      +
      public class BasicMachine
      +extends java.lang.Object
      How much memory, and how many threads share that memory. This is used to describe either total system resources (system RAM and total number of threads handled by the processors) or a cache (processor's L1, L2 or L3 cache).
      -
    -
    -
      - -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      final long
      - -
       
      -
      final int
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        longmemory 
        intthreads 
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      BasicMachine(long memory, - int threads)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        BasicMachine(long memory, + int threads) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      - -
       
      -
      int
      - -
       
      - - -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object obj) 
        inthashCode() 
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Field Details

      -
        -
      • -
        -

        memory

        -
        public final long memory
        -
        +
          +
        • + + +

          Field Detail

          + + + +
            +
          • +

            memory

            +
            public final long memory
          • -
          • -
            -

            threads

            -
            public final int threads
            -
            +
          + + + +
            +
          • +

            threads

            +
            public final int threads
          -
    • +
    -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      BasicMachine

      -
      public BasicMachine(long memory, - int threads)
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          BasicMachine

          +
          public BasicMachine(long memory,
          +                    int threads)
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      equals

      -
      public boolean equals(Object obj)
      -
      -
      Overrides:
      -
      equals in class Object
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object obj)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          -
    • -
    • -
      -

      hashCode

      -
      public int hashCode()
      -
      -
      Overrides:
      -
      hashCode in class Object
      +
    + + + +
      +
    • +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class java.lang.Object
      -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/machine/CommonMachine.html b/docs/jdocs/neureka/devices/host/machine/CommonMachine.html index e2b8328d3..656a91608 100644 --- a/docs/jdocs/neureka/devices/host/machine/CommonMachine.html +++ b/docs/jdocs/neureka/devices/host/machine/CommonMachine.html @@ -1,298 +1,446 @@ - + + - -CommonMachine (neureka 1.0.0 API) - - - - + +CommonMachine (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CommonMachine

    -
    -
    java.lang.Object -
    neureka.devices.host.machine.BasicMachine -
    neureka.devices.host.machine.CommonMachine
    +
    neureka.devices.host.machine
    +

    Class CommonMachine

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    +
      +
    • -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      final String
      - -
       
      -
      final long
      - -
      +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        java.lang.Stringarchitecture 
        longcache
        The size of one top level (L3 or L2) cache unit in bytes.
        - -
        final int
        - -
        +
        intcores
        The total number of processor cores.
        - -
        protected static final long
        - -
         
        -
        final int
        - -
        +
        protected static longK 
        intunits
        The number of top level (L3 or L2) cache units.
        - - -
        -

        Fields inherited from class neureka.devices.host.machine.BasicMachine

        -memory, threads
        - +
        +
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      -
      CommonMachine(String architecture, - BasicMachine[] levels)
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected CommonMachine(Hardware hardware, + java.lang.Runtime runtime) 
        protected CommonMachine(java.lang.String architecture, + BasicMachine[] levels)
        new MemoryThreads[] { SYSTEM, L3, L2, L1 } or new MemoryThreads[] { SYSTEM, L2, L1 } or in worst case new MemoryThreads[] { SYSTEM, L1 }
        - -
        protected
        -
        CommonMachine(Hardware hardware, - Runtime runtime)
        -
         
        - - +
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      boolean
      - -
       
      -
      int
      - -
       
      -
      -
      -
      -
      -

      Methods inherited from class neureka.devices.host.machine.BasicMachine

      -toString
      -
      -

      Methods inherited from class java.lang.Object

      -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object obj) 
        inthashCode() 
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Field Details

      - + + + +
        +
      • +

        architecture

        +
        public final java.lang.String architecture
      • -
      • -
        -

        cache

        -
        public final long cache
        +
      + + + +
        +
      • +

        cache

        +
        public final long cache
        The size of one top level (L3 or L2) cache unit in bytes.
        -
    • -
    • -
      -

      cores

      -
      public final int cores
      +
    + + + +
      +
    • +

      cores

      +
      public final int cores
      The total number of processor cores.
      -
    • -
    • -
      -

      units

      -
      public final int units
      +
    + + + +
      +
    • +

      units

      +
      public final int units
      The number of top level (L3 or L2) cache units. With L3 cache defined, this corresponds to the number of CPUs.
      -
    - + -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      CommonMachine

      -
      protected CommonMachine(Hardware hardware, - Runtime runtime)
      -
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CommonMachine

          +
          protected CommonMachine(Hardware hardware,
          +                        java.lang.Runtime runtime)
        • -
        • -
          -

          CommonMachine

          -
          protected CommonMachine(String architecture, - BasicMachine[] levels)
          +
        + + + +
          +
        • +

          CommonMachine

          +
          protected CommonMachine(java.lang.String architecture,
          +                        BasicMachine[] levels)
          new MemoryThreads[] { SYSTEM, L3, L2, L1 } or new MemoryThreads[] { SYSTEM, L2, L1 } or in worst case new MemoryThreads[] { SYSTEM, L1 }
          -
          -
          Parameters:
          +
          +
          Parameters:
          architecture - The CPU architecture (like x86 for example).
          levels - Cache levels.
          -
  • - + -
  • -
    -

    Method Details

    - + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/machine/ConcreteMachine.html b/docs/jdocs/neureka/devices/host/machine/ConcreteMachine.html index 70871ba6e..a812866fd 100644 --- a/docs/jdocs/neureka/devices/host/machine/ConcreteMachine.html +++ b/docs/jdocs/neureka/devices/host/machine/ConcreteMachine.html @@ -1,227 +1,371 @@ - + + - -ConcreteMachine (neureka 1.0.0 API) - - - - + +ConcreteMachine (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ConcreteMachine

    -
    -
    java.lang.Object - +
    neureka.devices.host.machine
    +

    Class ConcreteMachine

    -
    -
    -
    public final class ConcreteMachine -extends CommonMachine
    -
    -
    -
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getArchitecture

      -
      public static String getArchitecture()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getArchitecture

          +
          public static java.lang.String getArchitecture()
        • -
        • -
          -

          getMemory

          -
          public static long getMemory()
          -
          +
        + + + +
          +
        • +

          getMemory

          +
          public static long getMemory()
        • -
        • -
          -

          getThreads

          -
          public static int getThreads()
          -
          +
        + + + +
          +
        • +

          getThreads

          +
          public static int getThreads()
        • -
        • -
          -

          equals

          -
          public boolean equals(Object obj)
          -
          -
          Overrides:
          -
          equals in class CommonMachine
          +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object obj)
          +
          +
          Overrides:
          +
          equals in class CommonMachine
          -
  • -
  • -
    -

    hashCode

    -
    public int hashCode()
    -
    -
    Overrides:
    -
    hashCode in class CommonMachine
    + + + + +
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class BasicMachine
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class BasicMachine
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/machine/Hardware.html b/docs/jdocs/neureka/devices/host/machine/Hardware.html index 536a5a318..441dd68aa 100644 --- a/docs/jdocs/neureka/devices/host/machine/Hardware.html +++ b/docs/jdocs/neureka/devices/host/machine/Hardware.html @@ -1,98 +1,135 @@ - + + - -Hardware (neureka 1.0.0 API) - - - - + +Hardware (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Hardware

    -
    -
    java.lang.Object -
    neureka.devices.host.machine.BasicMachine -
    neureka.devices.host.machine.CommonMachine -
    neureka.devices.host.machine.Hardware
    -
    +
    neureka.devices.host.machine
    +

    Class Hardware

    -
    -
    -
    +
    + +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Comparable<Hardware>
      +
      java.lang.Comparable<Hardware>

      -
      public final class Hardware -extends CommonMachine -implements Comparable<Hardware>
      +
      +
      public final class Hardware
      +extends CommonMachine
      +implements java.lang.Comparable<Hardware>
      This models the cache levels and threads of a CPU using an array of where each entry represents a memory level.
      • The first element in the array should correspond to total system resources; the total amount of RAM and the total number of threads (Typically the same as what is returned by - Runtime.availableProcessors()).
      • + Runtime.availableProcessors()).
      • The last element in the array should describe the L1 cache. Typically Intel processors have 32k L1 cache and AMD 64k. 1 or maybe 2 threads use/share this cache.
      • Caches, all levels except L1, are described between the first and last elements in descending order (L3 @@ -105,247 +142,373 @@

        Class Hardware

      • Define new entries using this pattern: [SYSTEM, L3, L2, L1] or [SYSTEM, L2, L1] or [SYSTEM, L1]
      -
    -
    -
      + +
    +
    +
    + -
  • -
    -

    Constructor Details

    -
      -
    • -
      -

      Hardware

      -
      public Hardware(String architecture, - BasicMachine[] levels)
      +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Hardware

          +
          public Hardware(java.lang.String architecture,
          +                BasicMachine[] levels)
          new BasicMachine[] { SYSTEM, L3, L2, L1 } or new BasicMachine[] { SYSTEM, L2, L1 } or in worst case new BasicMachine[] { SYSTEM, L1 }
          -
          -
          Parameters:
          +
          +
          Parameters:
          architecture - The architecture code.
          levels - The cache levels.
          -
    -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      makeSimple

      -
      public static Hardware makeSimple()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          makeSimple

          +
          public static Hardware makeSimple()
        • -
        • -
          -

          makeSimple

          -
          public static Hardware makeSimple(String systemArchitecture, - long systemMemory, - int systemThreads)
          -
          +
        + + + +
          +
        • +

          makeSimple

          +
          public static Hardware makeSimple(java.lang.String systemArchitecture,
          +                                  long systemMemory,
          +                                  int systemThreads)
        • -
        • -
          -

          compareTo

          -
          public int compareTo(Hardware other)
          -
          -
          Specified by:
          -
          compareTo in interface Comparable<Hardware>
          +
        + + + +
          +
        • +

          compareTo

          +
          public int compareTo(Hardware other)
          +
          +
          Specified by:
          +
          compareTo in interface java.lang.Comparable<Hardware>
          -
  • -
  • -
    -

    equals

    -
    public boolean equals(Object obj)
    -
    -
    Overrides:
    -
    equals in class CommonMachine
    + + + + +
      +
    • +

      equals

      +
      public boolean equals(java.lang.Object obj)
      +
      +
      Overrides:
      +
      equals in class CommonMachine
      -
  • -
  • -
    -

    hashCode

    -
    public int hashCode()
    -
    -
    Overrides:
    -
    hashCode in class CommonMachine
    + + + + +
  • -
  • -
    -

    isL2Specified

    -
    public boolean isL2Specified()
    -
    + + + + +
      +
    • +

      isL2Specified

      +
      public boolean isL2Specified()
    • -
    • -
      -

      isL3Specified

      -
      public boolean isL3Specified()
      -
      +
    + + + +
      +
    • +

      isL3Specified

      +
      public boolean isL3Specified()
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class BasicMachine
      +
    + + + + + + + +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/host/machine/package-frame.html b/docs/jdocs/neureka/devices/host/machine/package-frame.html new file mode 100644 index 000000000..8d55b337a --- /dev/null +++ b/docs/jdocs/neureka/devices/host/machine/package-frame.html @@ -0,0 +1,22 @@ + + + + + +neureka.devices.host.machine (neureka 1.0.1 API) + + + + +

    neureka.devices.host.machine

    + + + diff --git a/docs/jdocs/neureka/devices/host/machine/package-summary.html b/docs/jdocs/neureka/devices/host/machine/package-summary.html index 99ba3004b..2198f359d 100644 --- a/docs/jdocs/neureka/devices/host/machine/package-summary.html +++ b/docs/jdocs/neureka/devices/host/machine/package-summary.html @@ -1,115 +1,169 @@ - + + - -neureka.devices.host.machine (neureka 1.0.0 API) - - - - + +neureka.devices.host.machine (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.devices.host.machine

    -
    -
    -
    package neureka.devices.host.machine
    -
    -
    Everything in this package should be considered library-private! - DO NOT USE CLASSES INSIDE THIS PACKAGE!
    -
    -
    -
    -
    + + + +

    Package neureka.devices.host.machine Description

    +
    Everything in this package should be considered library-private! + DO NOT USE CLASSES INSIDE THIS PACKAGE!
    +
    + + + + diff --git a/docs/jdocs/neureka/devices/host/machine/package-tree.html b/docs/jdocs/neureka/devices/host/machine/package-tree.html index 181fb9353..4a77511d1 100644 --- a/docs/jdocs/neureka/devices/host/machine/package-tree.html +++ b/docs/jdocs/neureka/devices/host/machine/package-tree.html @@ -1,70 +1,90 @@ - + + - -neureka.devices.host.machine Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.devices.host.machine Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.devices.host.machine

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/host/package-frame.html b/docs/jdocs/neureka/devices/host/package-frame.html new file mode 100644 index 000000000..eeeb06c9d --- /dev/null +++ b/docs/jdocs/neureka/devices/host/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.devices.host (neureka 1.0.1 API) + + + + +

    neureka.devices.host

    +
    +

    Interfaces

    + +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/devices/host/package-summary.html b/docs/jdocs/neureka/devices/host/package-summary.html index 6a3b3bc73..833062d5a 100644 --- a/docs/jdocs/neureka/devices/host/package-summary.html +++ b/docs/jdocs/neureka/devices/host/package-summary.html @@ -1,131 +1,174 @@ - + + - -neureka.devices.host (neureka 1.0.0 API) - - - - + +neureka.devices.host (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.devices.host

    -
    -
    -
    package neureka.devices.host
    -
    -
    +
    +
      +
    • + + + + + + + + + + + + + + + + +
      Interface Summary 
      InterfaceDescription
      CPU.IndexedWorkload 
      CPU.RangeWorkload +
      A simple functional interface for executing a range whose implementations will + either be executed sequentially or they are being dispatched to + a thread-pool, given that the provided workload is large enough.
      +
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      -
      The CPU class, one of many implementations of the Device interface, +
    • + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      CPU +
      The CPU class, one of many implementations of the Device interface, is simply supposed to be an API for dispatching threaded workloads onto the CPU as well as reading from or writing to tensors it stores.
      - - -
       
      - -
      -
      The CPU.JVMExecutor offers a similar functionality as the parallel stream API, - however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas +
      CPU.JVMExecutor +
      The CPU.JVMExecutor offers a similar functionality as the parallel stream API, + however it differs in that the CPU.JVMExecutor is processing CPU.RangeWorkload lambdas instead of simply exposing a single index or concrete elements for a given workload size.
      - - -
      -
      A simple functional interface for executing a range whose implementations will - either be executed sequentially or they are being dispatched to - a thread-pool, given that the provided workload is large enough.
      -
      - - - +
    - -
    + + + + diff --git a/docs/jdocs/neureka/devices/host/package-tree.html b/docs/jdocs/neureka/devices/host/package-tree.html index f780b1506..8974988c5 100644 --- a/docs/jdocs/neureka/devices/host/package-tree.html +++ b/docs/jdocs/neureka/devices/host/package-tree.html @@ -1,87 +1,148 @@ - + + - -neureka.devices.host Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.devices.host Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.devices.host

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/opencl/KernelCache.html b/docs/jdocs/neureka/devices/opencl/KernelCache.html index 0740c706f..839d54b58 100644 --- a/docs/jdocs/neureka/devices/opencl/KernelCache.html +++ b/docs/jdocs/neureka/devices/opencl/KernelCache.html @@ -1,185 +1,303 @@ - + + - -KernelCache (neureka 1.0.0 API) - - - - + +KernelCache (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class KernelCache

    -
    -
    java.lang.Object -
    neureka.devices.opencl.KernelCache
    +
    neureka.devices.opencl
    +

    Class KernelCache

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.opencl.KernelCache
      • +
      +
    • +
    +
    +
      +

    • -
      public final class KernelCache -extends Object
      -
      A fixed sized cache for ad-hoc (just in time compiled) OpenCLDevice kernels. +
      +
      public final class KernelCache
      +extends java.lang.Object
      +
      A fixed sized cache for ad-hoc (just in time compiled) OpenCLDevice kernels. This cache will mostly only be utilized when integrating with custom kernels - or when Operations are being optimized for - the OpenCLDevice.

      + or when Operations are being optimized for + the OpenCLDevice.

      Warning: This is an internal class, meaning it should not be used anywhere but within this library.
      This class or its public methods might change or get removed in future versions!
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        KernelCache() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      neureka.devices.opencl.OpenCLDevice.cl_ad_hoc
      -
      get(String name)
      -
       
      -
      boolean
      -
      has(String name)
      -
       
      -
      void
      -
      put(String name, - neureka.devices.opencl.OpenCLDevice.cl_ad_hoc kernel)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        neureka.devices.opencl.OpenCLDevice.cl_ad_hocget(java.lang.String name) 
        booleanhas(java.lang.String name) 
        voidput(java.lang.String name, + neureka.devices.opencl.OpenCLDevice.cl_ad_hoc kernel) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        KernelCache

        -
        public KernelCache()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            KernelCache

            +
            public KernelCache()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      put

      -
      public void put(String name, - neureka.devices.opencl.OpenCLDevice.cl_ad_hoc kernel)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          put

          +
          public void put(java.lang.String name,
          +                neureka.devices.opencl.OpenCLDevice.cl_ad_hoc kernel)
        • -
        • -
          -

          has

          -
          public boolean has(String name)
          -
          +
        + + + +
          +
        • +

          has

          +
          public boolean has(java.lang.String name)
        • -
        • -
          -

          get

          -
          public neureka.devices.opencl.OpenCLDevice.cl_ad_hoc get(String name)
          -
          +
        + + + +
          +
        • +

          get

          +
          public neureka.devices.opencl.OpenCLDevice.cl_ad_hoc get(java.lang.String name)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/KernelCaller.html b/docs/jdocs/neureka/devices/opencl/KernelCaller.html index 4e86894b5..9d87ff668 100644 --- a/docs/jdocs/neureka/devices/opencl/KernelCaller.html +++ b/docs/jdocs/neureka/devices/opencl/KernelCaller.html @@ -1,390 +1,513 @@ - + + - -KernelCaller (neureka 1.0.0 API) - - - - + +KernelCaller (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class KernelCaller

    -
    -
    java.lang.Object -
    neureka.devices.opencl.KernelCaller
    +
    neureka.devices.opencl
    +

    Class KernelCaller

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.opencl.KernelCaller
      • +
      +
    • +
    +
    +
      +

    • -
      public class KernelCaller -extends Object
      -
      Instances of this class are utility factories provided by OpenCLDevice instances. - When building new operations for tensors then this KernelCaller class is essential +
      +
      public class KernelCaller
      +extends java.lang.Object
      +
      Instances of this class are utility factories provided by OpenCLDevice instances. + When building new operations for tensors then this KernelCaller class is essential for calling compiled kernels residing within the gpu.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      KernelCaller(org.jocl.cl_kernel kernel, - org.jocl.cl_command_queue queue)
      -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        KernelCaller

        -
        public KernelCaller(org.jocl.cl_kernel kernel, - org.jocl.cl_command_queue queue)
        -
        -
        Parameters:
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            KernelCaller

            +
            public KernelCaller(org.jocl.cl_kernel kernel,
            +                    org.jocl.cl_command_queue queue)
            +
            +
            Parameters:
            kernel - The kernel which ought to be called.
            queue - The queue on which calls ought to be dispatched.
            -
      -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      passAllOf

      -
      public KernelCaller passAllOf(Tensor<Number> tensor)
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          passAllOf

          +
          public KernelCaller passAllOf(Tensor<java.lang.Number> tensor)
          This method passes 2 arguments to the kernel. One for the data of the tensor and one for the configuration data!
          -
          -
          Parameters:
          +
          +
          Parameters:
          tensor - The tensor whose data and configuration ought to be passed to the kernel.
          -
          Returns:
          +
          Returns:
          This very KernelCaller instance (factory pattern).
          -
    • -
    • -
      -

      passConfOf

      -
      public KernelCaller passConfOf(Tensor<Number> tensor)
      +
    + + + +
      +
    • +

      passConfOf

      +
      public KernelCaller passConfOf(Tensor<java.lang.Number> tensor)
      This method passes the ND-Configuration in the form of a flattened int array to the kernel. Kernels can use this information for more complex indexing mechanisms as one would expect them to be present in tensor which have been permuted or are simply slices of other tensors.
      -
      -
      Parameters:
      +
      +
      Parameters:
      tensor - The tensor whose ND configuration ought to be passed to the kernel.
      -
      Returns:
      +
      Returns:
      This very KernelCaller instance (factory pattern).
      -
  • -
  • -
    -

    pass

    -
    public <T extends Number> KernelCaller pass(Tensor<T> tensor)
    + + + + +
      +
    • +

      pass

      +
      public <T extends java.lang.Number> KernelCaller pass(Tensor<T> tensor)
      This method passes 1 argument to the kernel. Namely, the data of the tensor!
      -
      -
      Parameters:
      +
      +
      Parameters:
      tensor - The tensor whose data ought to be passed to the kernel.
      -
      Returns:
      +
      Returns:
      This very KernelCaller instance (factory pattern).
      -
  • -
  • -
    -

    pass

    -
    public KernelCaller pass(int value)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      pass

      +
      public KernelCaller pass(int value)
      +
      +
      Parameters:
      value - An int value which ought to be passed to the kernel.
      -
      Returns:
      +
      Returns:
      This very KernelCaller instance (factory pattern).
      -
  • -
  • -
    -

    pass

    -
    public KernelCaller pass(int... values)
    + + + + +
      +
    • +

      pass

      +
      public KernelCaller pass(int... values)
      Use this to pass an array of int values to the kernel.
      -
      -
      Parameters:
      +
      +
      Parameters:
      values - An array of int values which ought to be passed to the kernel.
      -
      Returns:
      +
      Returns:
      This very KernelCaller instance (factory pattern).
      -
  • -
  • -
    -

    pass

    -
    public KernelCaller pass(float... values)
    + + + + +
      +
    • +

      pass

      +
      public KernelCaller pass(float... values)
      Use this to pass an array of float values to the kernel.
      -
      -
      Parameters:
      +
      +
      Parameters:
      values - An array of float values which ought to be passed to the kernel.
      -
      Returns:
      +
      Returns:
      This very KernelCaller instance (factory pattern).
      -
  • -
  • -
    -

    pass

    -
    public KernelCaller pass(double... values)
    -
    + + + + + + + + + + + + + + + + +
      +
    • +

      pass

      +
      public KernelCaller pass(byte... values)
    • -
    • -
      -

      pass

      -
      public KernelCaller pass(float value)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      pass

      +
      public KernelCaller pass(float value)
      +
      +
      Parameters:
      value - A float value which ought to be passed to the kernel.
      -
      Returns:
      +
      Returns:
      This very KernelCaller instance (factory pattern).
      -
    • -
    • -
      -

      pass

      -
      public KernelCaller pass(double value)
      -
      +
    + + + + + + + + + + + + + + + + + + + +
      +
    • +

      pass

      +
      public KernelCaller pass(java.lang.Number value)
    • -
    • -
      -

      passLocalFloats

      -
      public KernelCaller passLocalFloats(long size)
      -
      +
    + + + +
      +
    • +

      passLocalFloats

      +
      public KernelCaller passLocalFloats(long size)
    • -
    • -
      -

      call

      -
      public void call(int globalWorkSize)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      call

      +
      public void call(int globalWorkSize)
      +
      +
      Parameters:
      globalWorkSize - The number of global threads which will be dispatched.
      -
    • -
    • -
      -

      call

      -
      public void call(long[] globalWorkSizes, - long[] localWorkSizes)
      +
    + + + +
      +
    • +

      call

      +
      public void call(long[] globalWorkSizes,
      +                 long[] localWorkSizes)
      Use this to call the kernel with 2 long arrays defining how the kernel should be indexed and parallelized. The globalWorkSizes span an n-dimensional grid of global threads, whereas the localWorkSizes defines the dimensions of a grid of local work items (which are called "work groups"). @@ -399,21 +522,80 @@

      call

      This can usually be circumvented by padding the data to be a multiple of a more appropriate local work size or by introducing boundary checks in your kernel.

      -
      -
      Parameters:
      +
      +
      Parameters:
      globalWorkSizes - An arrays of long values which span a nd-grid of global threads.
      localWorkSizes - An arrays of long values which span a nd-grid of local threads (work groups).
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/KernelCode.html b/docs/jdocs/neureka/devices/opencl/KernelCode.html index d2922cf69..f2b6a0ff6 100644 --- a/docs/jdocs/neureka/devices/opencl/KernelCode.html +++ b/docs/jdocs/neureka/devices/opencl/KernelCode.html @@ -1,216 +1,346 @@ - + + - -KernelCode (neureka 1.0.0 API) - - - - + +KernelCode (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class KernelCode

    -
    -
    java.lang.Object -
    neureka.devices.opencl.KernelCode
    +
    neureka.devices.opencl
    +

    Class KernelCode

    -
    -
    -
    public final class KernelCode -extends Object
    -
    -
    -
      - +
      + +
      +
        +
      • +
        +
        +
        public final class KernelCode
        +extends java.lang.Object
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        KernelCode

        -
        public KernelCode(String name, - String code)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            KernelCode

            +
            public KernelCode(java.lang.String name,
            +                  java.lang.String code)
          • -
          • -
            -

            KernelCode

            -
            public KernelCode(String name, - String code, - DataType<?> dataType)
            -
            +
          + + + +
            +
          • +

            KernelCode

            +
            public KernelCode(java.lang.String name,
            +                  java.lang.String code,
            +                  DataType<?> dataType)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getName

      -
      public String getName()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
        • -
        • -
          -

          getCode

          -
          public String getCode()
          -
          +
        + + + +
          +
        • +

          getCode

          +
          public java.lang.String getCode()
        • -
        • -
          -

          getDataType

          -
          public DataType<?> getDataType()
          -
          +
        + + + +
          +
        • +

          getDataType

          +
          public DataType<?> getDataType()
        • -
        • -
          -

          equals

          -
          public boolean equals(Object o)
          -
          -
          Overrides:
          -
          equals in class Object
          +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object o)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          -
  • -
  • -
    -

    hashCode

    -
    public int hashCode()
    -
    -
    Overrides:
    -
    hashCode in class Object
    + + + + +
      +
    • +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/KernelSource.html b/docs/jdocs/neureka/devices/opencl/KernelSource.html index 971762829..fb53ad38d 100644 --- a/docs/jdocs/neureka/devices/opencl/KernelSource.html +++ b/docs/jdocs/neureka/devices/opencl/KernelSource.html @@ -1,138 +1,232 @@ - + + - -KernelSource (neureka 1.0.0 API) - - - - + +KernelSource (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface KernelSource

    +
    neureka.devices.opencl
    +

    Interface KernelSource

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
       
      -
      -
      +
      +
      @FunctionalInterface
      +public interface KernelSource
      +
      Provides kernel source code for a provided ExecutionCall.
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Query.html b/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Query.html index f2d789daf..7a9916e5c 100644 --- a/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Query.html +++ b/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Query.html @@ -1,343 +1,485 @@ - + + - -OpenCLDevice.Query (neureka 1.0.0 API) - - - - + +OpenCLDevice.Query (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class OpenCLDevice.Query

    -
    -
    java.lang.Object -
    neureka.devices.opencl.OpenCLDevice.Query
    +
    neureka.devices.opencl
    +

    Class OpenCLDevice.Query

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.opencl.OpenCLDevice.Query
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public static class OpenCLDevice.Query
      +extends java.lang.Object
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Query() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static int
      -
      getInt(org.jocl.cl_device_id device, - int paramName)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static intgetInt(org.jocl.cl_device_id device, + int paramName)
        Returns the value of the device info parameter with the given name
        - -
        static int[]
        -
        getInts(org.jocl.cl_device_id device, - int paramName, - int numValues)
        -
        +
        static int[]getInts(org.jocl.cl_device_id device, + int paramName, + int numValues)
        Returns the values of the device info parameter with the given name
        - -
        static long
        -
        getLong(org.jocl.cl_device_id device, - int paramName)
        -
        +
        static longgetLong(org.jocl.cl_device_id device, + int paramName)
        Returns the value of the device info parameter with the given name
        - -
        static long[]
        -
        getLongs(int numValues, - ByteBuffer buffer, - long[] values)
        -
         
        -
        static long[]
        -
        getLongs(org.jocl.cl_device_id device, - int paramName, - int numValues)
        -
        +
        static long[]getLongs(org.jocl.cl_device_id device, + int paramName, + int numValues)
        Returns the values of the device info parameter with the given name
        - -
        static long
        -
        getSize(org.jocl.cl_device_id device, - int paramName)
        -
        +
        static long[]getLongs(int numValues, + java.nio.ByteBuffer buffer, + long[] values) 
        static longgetSize(org.jocl.cl_device_id device, + int paramName)
        Returns the value of the device info parameter with the given name
        - -
        static long[]
        -
        getSizes(org.jocl.cl_device_id device, - int paramName, - int numValues)
        -
        +
        static long[]getSizes(org.jocl.cl_device_id device, + int paramName, + int numValues)
        Returns the values of the device info parameter with the given name
        - -
        static String
        -
        getString(org.jocl.cl_device_id device, - int paramName)
        -
        +
        static java.lang.StringgetString(org.jocl.cl_device_id device, + int paramName)
        Returns the value of the device info parameter with the given name
        - -
        static String
        -
        getString(org.jocl.cl_platform_id platform, - int paramName)
        -
        +
        static java.lang.StringgetString(org.jocl.cl_platform_id platform, + int paramName)
        Returns the value of the platform info parameter with the given name
        - - - - -
        -

        Methods inherited from class java.lang.Object

        -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        - +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Query

        -
        public Query()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Query

            +
            public Query()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getInt

      -
      public static int getInt(org.jocl.cl_device_id device, - int paramName)
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getInt

          +
          public static int getInt(org.jocl.cl_device_id device,
          +                         int paramName)
          Returns the value of the device info parameter with the given name
          -
          -
          Parameters:
          +
          +
          Parameters:
          device - The device
          paramName - The parameter name
          -
          Returns:
          +
          Returns:
          The value
          -
    • -
    • -
      -

      getInts

      -
      public static int[] getInts(org.jocl.cl_device_id device, - int paramName, - int numValues)
      +
    + + + +
      +
    • +

      getInts

      +
      public static int[] getInts(org.jocl.cl_device_id device,
      +                            int paramName,
      +                            int numValues)
      Returns the values of the device info parameter with the given name
      -
      -
      Parameters:
      +
      +
      Parameters:
      device - The device
      paramName - The parameter name
      numValues - The number of values
      -
      Returns:
      +
      Returns:
      The value
      -
  • -
  • -
    -

    getLong

    -
    public static long getLong(org.jocl.cl_device_id device, - int paramName)
    + + + + +
      +
    • +

      getLong

      +
      public static long getLong(org.jocl.cl_device_id device,
      +                           int paramName)
      Returns the value of the device info parameter with the given name
      -
      -
      Parameters:
      +
      +
      Parameters:
      device - The device
      paramName - The parameter name
      -
      Returns:
      +
      Returns:
      The value
      -
  • -
  • -
    -

    getLongs

    -
    public static long[] getLongs(org.jocl.cl_device_id device, - int paramName, - int numValues)
    + + + + +
      +
    • +

      getLongs

      +
      public static long[] getLongs(org.jocl.cl_device_id device,
      +                              int paramName,
      +                              int numValues)
      Returns the values of the device info parameter with the given name
      -
      -
      Parameters:
      +
      +
      Parameters:
      device - The device
      paramName - The parameter name
      numValues - The number of values
      -
      Returns:
      +
      Returns:
      The value
      -
  • -
  • -
    -

    getString

    -
    public static String getString(org.jocl.cl_device_id device, - int paramName)
    + + + + +
      +
    • +

      getString

      +
      public static java.lang.String getString(org.jocl.cl_device_id device,
      +                                         int paramName)
      Returns the value of the device info parameter with the given name
      -
      -
      Parameters:
      +
      +
      Parameters:
      device - The device
      paramName - The parameter name
      -
      Returns:
      +
      Returns:
      The value
      -
  • -
  • -
    -

    getString

    -
    public static String getString(org.jocl.cl_platform_id platform, - int paramName)
    + + + + +
      +
    • +

      getString

      +
      public static java.lang.String getString(org.jocl.cl_platform_id platform,
      +                                         int paramName)
      Returns the value of the platform info parameter with the given name
      -
      -
      Parameters:
      +
      +
      Parameters:
      platform - The platform
      paramName - The parameter name
      -
      Returns:
      +
      Returns:
      The value
      -
  • -
  • -
    -

    getSize

    -
    public static long getSize(org.jocl.cl_device_id device, - int paramName)
    + + + + +
      +
    • +

      getSize

      +
      public static long getSize(org.jocl.cl_device_id device,
      +                           int paramName)
      Returns the value of the device info parameter with the given name
      -
      -
      Parameters:
      +
      +
      Parameters:
      device - The device
      paramName - The parameter name
      -
      Returns:
      +
      Returns:
      The value64
      -
  • -
  • -
    -

    getSizes

    -
    public static long[] getSizes(org.jocl.cl_device_id device, - int paramName, - int numValues)
    + + + + +
      +
    • +

      getSizes

      +
      public static long[] getSizes(org.jocl.cl_device_id device,
      +                              int paramName,
      +                              int numValues)
      Returns the values of the device info parameter with the given name
      -
      -
      Parameters:
      +
      +
      Parameters:
      device - The device
      paramName - The parameter name
      numValues - The number of values
      -
      Returns:
      +
      Returns:
      The value64
      -
  • -
  • -
    -

    getLongs

    -
    public static long[] getLongs(int numValues, - ByteBuffer buffer, - long[] values)
    -
    + + + + +
      +
    • +

      getLongs

      +
      public static long[] getLongs(int numValues,
      +                              java.nio.ByteBuffer buffer,
      +                              long[] values)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Type.html b/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Type.html index df65b2f21..bc02f699e 100644 --- a/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Type.html +++ b/docs/jdocs/neureka/devices/opencl/OpenCLDevice.Type.html @@ -1,261 +1,402 @@ - + + - -OpenCLDevice.Type (neureka 1.0.0 API) - - - - + +OpenCLDevice.Type (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class OpenCLDevice.Type

    -
    -
    java.lang.Object -
    java.lang.Enum<OpenCLDevice.Type> -
    neureka.devices.opencl.OpenCLDevice.Type
    +
    neureka.devices.opencl
    +

    Enum OpenCLDevice.Type

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<OpenCLDevice.Type>
      • +
      • +
          +
        • neureka.devices.opencl.OpenCLDevice.Type
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static OpenCLDevice.Type[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static OpenCLDevice.Type[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (OpenCLDevice.Type c : OpenCLDevice.Type.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static OpenCLDevice.Type valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static OpenCLDevice.Type valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/OpenCLDevice.html b/docs/jdocs/neureka/devices/opencl/OpenCLDevice.html index af3237432..e18a6a183 100644 --- a/docs/jdocs/neureka/devices/opencl/OpenCLDevice.html +++ b/docs/jdocs/neureka/devices/opencl/OpenCLDevice.html @@ -1,485 +1,653 @@ - + + - -OpenCLDevice (neureka 1.0.0 API) - - - - + +OpenCLDevice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class OpenCLDevice

    +
    neureka.devices.opencl
    +

    Class OpenCLDevice

    - -
    -
    +
    + +
    +
    -
    -
    -
    -

    Nested classes/interfaces inherited from interface neureka.common.composition.Component

    -Component.IsBeing, Component.OwnerChangeRequest<O>
    -
    -

    Nested classes/interfaces inherited from interface neureka.devices.Device

    -Device.Access<V>, Device.In, Device.Writer
    - +
    +
    -
    long
    - -
     
    -
    final String
    - -
     
    - - -
     
    -
    boolean
    - -
    -
    A Device is a component of a tensor.
    -
    - - -
     
    - - -
     
    -
    -
    -
    -
    -

    Methods inherited from class neureka.devices.AbstractDevice

    -_cleaning, access, approve, store
    -
    -

    Methods inherited from class neureka.devices.AbstractBaseDevice

    -contains, has, isEmpty, numberOfDataObjects, numberOfStored
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.devices.Device

    -allocate, borrow, optimizedFunctionOf
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Method Details

        -
          -
        • -
          -

          toString

          -
          public final String toString()
          -
          -
          Overrides:
          -
          toString in class Object
          +
            +
          • + + +

            Method Detail

            + + + +
              +
            • +

              toString

              +
              public final java.lang.String toString()
              +
              +
              Overrides:
              +
              toString in class java.lang.Object
              -
        • -
        • -
          -

          getId

          -
          public final org.jocl.cl_device_id getId()
          -
          +
        + + + +
          +
        • +

          getId

          +
          public final org.jocl.cl_device_id getId()
        • -
        • -
          -

          getPlatform

          -
          public final OpenCLPlatform getPlatform()
          -
          +
        + + + +
          +
        • +

          getPlatform

          +
          public final OpenCLPlatform getPlatform()
        • -
        • -
          -

          hasAdHocKernel

          -
          public boolean hasAdHocKernel(String name)
          -
          -
          Parameters:
          +
        + + + +
          +
        • +

          hasAdHocKernel

          +
          public boolean hasAdHocKernel(java.lang.String name)
          +
          +
          Parameters:
          name - The name of the kernel whose presents should be checked.
          -
          Returns:
          +
          Returns:
          True if the kernel is present in the cache, false otherwise.
          -
      • -
      • -
        -

        getAdHocKernel

        -
        public KernelCaller getAdHocKernel(String name)
        -
        -
        Parameters:
        +
      + + + +
        +
      • +

        getAdHocKernel

        +
        public KernelCaller getAdHocKernel(java.lang.String name)
        +
        +
        Parameters:
        name - The name of the kernel which should be retrieved.
        -
        Returns:
        +
        Returns:
        The kernel with the given name if it is present in the cache, throws an exception otherwise.
        -
    -
  • -
    -

    findAdHocKernel

    -
    public Optional<KernelCaller> findAdHocKernel(String name)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      findAdHocKernel

      +
      public java.util.Optional<KernelCaller> findAdHocKernel(java.lang.String name)
      +
      +
      Parameters:
      name - The name of the kernel which should be retrieved.
      -
      Returns:
      -
      An Optional containing the kernel with the given name if it is present in the cache, an empty optional otherwise.
      +
      Returns:
      +
      An Optional containing the kernel with the given name if it is present in the cache, an empty optional otherwise.
      -
  • -
  • -
    -

    findOrCompileAdHocKernel

    -
    public KernelCaller findOrCompileAdHocKernel(String name, - Supplier<String> source)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      findOrCompileAdHocKernel

      +
      public KernelCaller findOrCompileAdHocKernel(java.lang.String name,
      +                                             java.util.function.Supplier<java.lang.String> source)
      +
      +
      Parameters:
      name - The name of the kernel which should be retrieved.
      source - The source code of the kernel which should be compiled if it is not present in the cache.
      -
      Returns:
      +
      Returns:
      The kernel caller for the kernel of the requested name, either from cache, or compiled from the given source code if it was not present in the cache.
      -
  • -
  • -
    -

    compileAndGetAdHocKernel

    -
    public KernelCaller compileAndGetAdHocKernel(String name, - String source)
    -
    This method compiles and returns the KernelCaller for a so called "ad hoc" kernel. + + + + +
      +
    • +

      compileAndGetAdHocKernel

      +
      public KernelCaller compileAndGetAdHocKernel(java.lang.String name,
      +                                             java.lang.String source)
      +
      This method compiles and returns the KernelCaller for a so called "ad hoc" kernel. Ad hoc is a Latin phrase meaning literally 'to this'. In English, it generally signifies a solution designed for a specific problem or task, non-generalizable, and not intended to be adapted to other purposes. @@ -487,20 +655,23 @@

      compileAndGetAdHocKernel

      unique kernels with a specific purpose created on the fly during runtime by operations. This might be useful for high performance operations on tensors with specific dimensions and or possibly other variables / properties which might be taken into account...
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - The name of the kernel which ought to be compiled.
      source - The source of the kernel which ought to be compiled.
      -
      Returns:
      -
      The KernelCaller for the compiled kernel.
      +
      Returns:
      +
      The KernelCaller for the compiled kernel.
      -
  • -
  • -
    -

    compileAdHocKernel

    -
    public OpenCLDevice compileAdHocKernel(String name, - String source)
    + + + + +
      +
    • +

      compileAdHocKernel

      +
      public OpenCLDevice compileAdHocKernel(java.lang.String name,
      +                                       java.lang.String source)
      This method compiles so called "ad hoc" kernel. Ad hoc is a Latin phrase meaning literally 'to this'. In English, it generally signifies a solution designed for a specific problem or task, @@ -509,495 +680,722 @@

      compileAdHocKernel

      unique kernels with a specific purpose created on the fly during runtime by operations. This might be useful for high performance operations on tensors with specific dimensions and or possibly other variables / properties which might be taken into account...
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - The name of the kernel which ought to be compiled.
      source - The source of the kernel which ought to be compiled.
      -
      Returns:
      +
      Returns:
      This very instance in order to enable the factory pattern.
      -
  • -
  • -
    -

    optimizedOperationOf

    -
    public Operation optimizedOperationOf(Function function, - String name)
    -
    Description copied from interface: Device
    -
    This method tries to allow this device to produce an optimized Operation + + + + +
      +
    • +

      optimizedOperationOf

      +
      public Operation optimizedOperationOf(Function function,
      +                                      java.lang.String name)
      +
      Description copied from interface: Device
      +
      This method tries to allow this device to produce an optimized Operation based on the provided function. This is especially useful in an OpenCL context which can compile the function into native GPU kernels at runtime.
      -
      -
      Parameters:
      +
      +
      Parameters:
      function - The function which should be turned into an optimized operation.
      name - The name of the returned operation.
      -
      Returns:
      +
      Returns:
      An optimized operation based on the provided function, or null if optimization is not possible.
      -
  • -
  • -
    -

    dispose

    -
    public void dispose()
    + + + + +
      +
    • +

      dispose

      +
      public void dispose()
      This method tells the to restore all tensors stored on it and release all resources.
      -
  • -
  • -
    -

    restore

    -
    public Device<Number> restore(Tensor<Number> tensor)
    + + + + +
      +
    • +

      restore

      +
      public Device<java.lang.Number> restore(Tensor<java.lang.Number> tensor)
      This method assumes that the passed tensor is stored on this device instance. If the tensor is stored on the device then the method loads the outsourced data of the tensor back into primitive JVM arrays and restores the tensor freshly in RAM.
      -
      -
      Parameters:
      +
      +
      Parameters:
      tensor - The tensor whose data ought to be restored (loaded to RAM).
      -
      Returns:
      +
      Returns:
      This device, which enables method chaining.
      -
  • -
  • -
    -

    clConfigOf

    -
    public neureka.devices.opencl.OpenCLDevice.cl_config clConfigOf(Tensor<?> t)
    -
    + + + + +
      +
    • +

      clConfigOf

      +
      public neureka.devices.opencl.OpenCLDevice.cl_config clConfigOf(Tensor<?> t)
    • -
    • -
      -

      clConfigOf

      -
      public neureka.devices.opencl.OpenCLDevice.cl_config clConfigOf(NDConfiguration ndc)
      -
      +
    + + + +
      +
    • +

      clConfigOf

      +
      public neureka.devices.opencl.OpenCLDevice.cl_config clConfigOf(NDConfiguration ndc)
    • -
    • -
      -

      free

      -
      public final <T extends Number> Device<Number> free(Tensor<T> tensor)
      -
      Description copied from interface: Device
      -
      Use this to remove the provided tensor from this Device!

      -
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      -
      Parameters:
      -
      tensor - The tensor which ought to be removed from this Device.
      -
      Returns:
      +
    + + + +
      +
    • +

      free

      +
      public final <T extends java.lang.Number> Device<java.lang.Number> free(Tensor<T> tensor)
      +
      Description copied from interface: Device
      +
      Use this to remove the provided tensor from this Device!

      +
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensor, which must be supported by this Device.
      +
      Parameters:
      +
      tensor - The tensor which ought to be removed from this Device.
      +
      Returns:
      This very instance to allow for method chaining.
      -
    • -
    • -
      -

      _readItem

      -
      protected final <T extends Number> T _readItem(Tensor<T> tensor, - int index)
      -
      -
      Specified by:
      -
      _readItem in class AbstractDevice<Number>
      +
    + + + +
      +
    • +

      _readItem

      +
      protected final <T extends java.lang.Number> T _readItem(Tensor<T> tensor,
      +                                                         int index)
      +
      +
      Specified by:
      +
      _readItem in class AbstractDevice<java.lang.Number>
      -
    • -
    • -
      -

      _readArray

      -
      protected final <T extends Number, -A> A _readArray(Tensor<T> tensor, - Class<A> arrayType, - int start, - int size)
      -
      -
      Specified by:
      -
      _readArray in class AbstractDevice<Number>
      +
    + + + +
      +
    • +

      _readArray

      +
      protected final <T extends java.lang.Number,A> A _readArray(Tensor<T> tensor,
      +                                                            java.lang.Class<A> arrayType,
      +                                                            int start,
      +                                                            int size)
      +
      +
      Specified by:
      +
      _readArray in class AbstractDevice<java.lang.Number>
      -
    • -
    • -
      -

      _writeItem

      -
      protected final <T extends Number> void _writeItem(Tensor<T> tensor, - T item, - int start, - int size)
      -
      -
      Specified by:
      -
      _writeItem in class AbstractDevice<Number>
      +
    + + + + + +
      +
    • +

      _writeItem

      +
      protected final <T extends java.lang.Number> void _writeItem(Tensor<T> tensor,
      +                                                             T item,
      +                                                             int start,
      +                                                             int size)
      +
      +
      Specified by:
      +
      _writeItem in class AbstractDevice<java.lang.Number>
      -
    • -
    • -
      -

      _writeArray

      -
      protected final <T extends Number> void _writeArray(Tensor<T> tensor, - Object array, - int offset, - int start, - int size)
      -
      -
      Specified by:
      -
      _writeArray in class AbstractDevice<Number>
      +
    + + + +
      +
    • +

      _writeArray

      +
      protected final <T extends java.lang.Number> void _writeArray(Tensor<T> tensor,
      +                                                              java.lang.Object array,
      +                                                              int offset,
      +                                                              int start,
      +                                                              int size)
      +
      +
      Specified by:
      +
      _writeArray in class AbstractDevice<java.lang.Number>
      -
    • -
    • -
      -

      allocate

      -
      public <T extends Number> Data<T> allocate(DataType<T> dataType, - NDConfiguration ndc)
      -
      +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
    • +

      _swap

      +
      protected final <T extends java.lang.Number> void _swap(Tensor<T> former,
      +                                                        Tensor<T> replacement)
      +
      Description copied from class: AbstractDevice
      This method is used internally mostly and should not be used in most cases.

      -
      -
      Specified by:
      -
      _swap in class AbstractDevice<Number>
      -
      Type Parameters:
      -
      T - The type parameter for the value type of the tensors, which must be supported by this Device.
      -
      Parameters:
      +
      +
      Specified by:
      +
      _swap in class AbstractDevice<java.lang.Number>
      +
      Type Parameters:
      +
      T - The type parameter for the value type of the tensors, which must be supported by this Device.
      +
      Parameters:
      former - The tensor whose associated data (on the device) ought to be assigned to the other tensor.
      replacement - The tensor which ought to receive the data of the former tensor internally.
      -
    • -
    • -
      -

      update

      -
      public boolean update(Component.OwnerChangeRequest<Tensor<Number>> changeRequest)
      -
      Description copied from class: AbstractDevice
      -
      A Device is a component of a tensor. This method is used to inform the device +
    + + + +
      +
    • +

      update

      +
      public boolean update(Component.OwnerChangeRequest<Tensor<java.lang.Number>> changeRequest)
      +
      Description copied from class: AbstractDevice
      +
      A Device is a component of a tensor. This method is used to inform the device that the device is being added, removed or replaced (from the tensor).
      -
      -
      Specified by:
      -
      update in interface Component<Tensor<Number>>
      -
      Overrides:
      -
      update in class AbstractDevice<Number>
      -
      Parameters:
      -
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      -
      Returns:
      +
      +
      Specified by:
      +
      update in interface Component<Tensor<java.lang.Number>>
      +
      Overrides:
      +
      update in class AbstractDevice<java.lang.Number>
      +
      Parameters:
      +
      changeRequest - An OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      +
      Returns:
      The truth value determining if the change should be executed.
      -
    • -
    • -
      -

      _sizeOccupiedBy

      -
      protected <T extends Number> int _sizeOccupiedBy(Tensor<T> tensor)
      -
      -
      Specified by:
      -
      _sizeOccupiedBy in class AbstractDevice<Number>
      +
    + + + + + + + + + + + + + + + +
      +
    • +

      getKernel

      +
      public KernelCaller getKernel(java.lang.String name)
      +
      +
      Parameters:
      +
      name - The name of the kernel for which a KernelCaller should be returned.
      +
      Returns:
      +
      A KernelCaller for calling the requested kernel.
      -
    • -
    • -
      -

      _approveExecutionOf

      -
      protected boolean _approveExecutionOf(Tensor<?>[] tensors, - int d, - Operation type)
      -
      Description copied from class: AbstractDevice
      +
    + + + +
      +
    • +

      _approveExecutionOf

      +
      protected boolean _approveExecutionOf(Tensor<?>[] tensors,
      +                                      int d,
      +                                      Operation type)
      +
      Description copied from class: AbstractDevice
      This method is the internal approval routine called by its public counterpart and implemented by classes extending this very abstract class. - It may or may not be called by an Algorithm - in order to allow a Device to checked if the provided arguments are suitable for execution.
      -
      -
      Specified by:
      -
      _approveExecutionOf in class AbstractDevice<Number>
      -
      Parameters:
      + It may or may not be called by an Algorithm + in order to allow a Device to checked if the provided arguments are suitable for execution.
  • +
    +
    Specified by:
    +
    _approveExecutionOf in class AbstractDevice<java.lang.Number>
    +
    Parameters:
    tensors - An array of input tensors.
    d - The index of the input which ought to be derived.
    type - The type of operation.
    -
    Returns:
    +
    Returns:
    The truth value determining if the provided arguments can be executed.
    - -
  • -
    -

    name

    -
    public String name()
    -
    + + + + +
      +
    • +

      name

      +
      public java.lang.String name()
    • -
    • -
      -

      vendor

      -
      public String vendor()
      -
      +
    + + + +
      +
    • +

      vendor

      +
      public java.lang.String vendor()
    • -
    • -
      -

      version

      -
      public String version()
      -
      +
    + + + +
      +
    • +

      version

      +
      public java.lang.String version()
    • -
    • -
      -

      type

      -
      public OpenCLDevice.Type type()
      -
      +
    + + + +
      +
    • +

      type

      +
      public OpenCLDevice.Type type()
    • -
    • -
      -

      maxComputeUnits

      -
      public int maxComputeUnits()
      -
      +
    + + + +
      +
    • +

      maxComputeUnits

      +
      public int maxComputeUnits()
    • -
    • -
      -

      maxWorkItemSimensions

      -
      public long maxWorkItemSimensions()
      -
      +
    + + + +
      +
    • +

      maxWorkItemSimensions

      +
      public long maxWorkItemSimensions()
    • -
    • -
      -

      maxWorkItemSizes

      -
      public long[] maxWorkItemSizes()
      -
      +
    + + + +
      +
    • +

      maxWorkItemSizes

      +
      public long[] maxWorkItemSizes()
    • -
    • -
      -

      maxWorkGroupSize

      -
      public long maxWorkGroupSize()
      -
      +
    + + + +
      +
    • +

      maxWorkGroupSize

      +
      public long maxWorkGroupSize()
    • -
    • -
      -

      maxClockFrequenzy

      -
      public long maxClockFrequenzy()
      -
      +
    + + + +
      +
    • +

      maxClockFrequenzy

      +
      public long maxClockFrequenzy()
    • -
    • -
      -

      maxAddressBits

      -
      public int maxAddressBits()
      -
      +
    + + + +
      +
    • +

      maxAddressBits

      +
      public int maxAddressBits()
    • -
    • -
      -

      maxMemAllocSize

      -
      public long maxMemAllocSize()
      -
      +
    + + + +
      +
    • +

      maxMemAllocSize

      +
      public long maxMemAllocSize()
    • -
    • -
      -

      globalMemSize

      -
      public long globalMemSize()
      -
      +
    + + + +
      +
    • +

      globalMemSize

      +
      public long globalMemSize()
    • -
    • -
      -

      errorCorrectionSupport

      -
      public int errorCorrectionSupport()
      -
      +
    + + + +
      +
    • +

      errorCorrectionSupport

      +
      public int errorCorrectionSupport()
    • -
    • -
      -

      localMemType

      -
      public int localMemType()
      -
      +
    + + + +
      +
    • +

      localMemType

      +
      public int localMemType()
    • -
    • -
      -

      localMemSize

      -
      public long localMemSize()
      -
      +
    + + + +
      +
    • +

      localMemSize

      +
      public long localMemSize()
    • -
    • -
      -

      maxConstantBufferSize

      -
      public long maxConstantBufferSize()
      -
      +
    + + + +
      +
    • +

      maxConstantBufferSize

      +
      public long maxConstantBufferSize()
    • -
    • -
      -

      maxConstantBufferSizeKB

      -
      public long maxConstantBufferSizeKB()
      -
      +
    + + + +
      +
    • +

      maxConstantBufferSizeKB

      +
      public long maxConstantBufferSizeKB()
    • -
    • -
      -

      imageSupport

      -
      public int imageSupport()
      -
      +
    + + + +
      +
    • +

      imageSupport

      +
      public int imageSupport()
    • -
    • -
      -

      maxReadImageArgs

      -
      public int maxReadImageArgs()
      -
      +
    + + + +
      +
    • +

      maxReadImageArgs

      +
      public int maxReadImageArgs()
    • -
    • -
      -

      maxWriteImageArgs

      -
      public int maxWriteImageArgs()
      -
      +
    + + + +
      +
    • +

      maxWriteImageArgs

      +
      public int maxWriteImageArgs()
    • -
    • -
      -

      singleFPConfig

      -
      public long singleFPConfig()
      -
      +
    + + + +
      +
    • +

      singleFPConfig

      +
      public long singleFPConfig()
    • -
    • -
      -

      image2DMaxWidth

      -
      public long image2DMaxWidth()
      -
      +
    + + + +
      +
    • +

      image2DMaxWidth

      +
      public long image2DMaxWidth()
    • -
    • -
      -

      image2DMaxHeight

      -
      public long image2DMaxHeight()
      -
      +
    + + + +
      +
    • +

      image2DMaxHeight

      +
      public long image2DMaxHeight()
    • -
    • -
      -

      image3DMaxWidth

      -
      public long image3DMaxWidth()
      -
      +
    + + + +
      +
    • +

      image3DMaxWidth

      +
      public long image3DMaxWidth()
    • -
    • -
      -

      image3DMaxHeight

      -
      public long image3DMaxHeight()
      -
      +
    + + + +
      +
    • +

      image3DMaxHeight

      +
      public long image3DMaxHeight()
    • -
    • -
      -

      image3DMaxDepth

      -
      public long image3DMaxDepth()
      -
      +
    + + + +
      +
    • +

      image3DMaxDepth

      +
      public long image3DMaxDepth()
    • -
    • -
      -

      prefVecWidthChar

      -
      public int prefVecWidthChar()
      -
      +
    + + + +
      +
    • +

      prefVecWidthChar

      +
      public int prefVecWidthChar()
    • -
    • -
      -

      prefVecWidthShort

      -
      public int prefVecWidthShort()
      -
      +
    + + + +
      +
    • +

      prefVecWidthShort

      +
      public int prefVecWidthShort()
    • -
    • -
      -

      prefVecWidthInt

      -
      public int prefVecWidthInt()
      -
      +
    + + + +
      +
    • +

      prefVecWidthInt

      +
      public int prefVecWidthInt()
    • -
    • -
      -

      prefVecWidthLong

      -
      public int prefVecWidthLong()
      -
      +
    + + + +
      +
    • +

      prefVecWidthLong

      +
      public int prefVecWidthLong()
    • -
    • -
      -

      prefVecWidthFloat

      -
      public int prefVecWidthFloat()
      -
      +
    + + + +
      +
    • +

      prefVecWidthFloat

      +
      public int prefVecWidthFloat()
    • -
    • -
      -

      prefVecWidthDouble

      -
      public int prefVecWidthDouble()
      -
      +
    + + + +
      +
    • +

      prefVecWidthDouble

      +
      public int prefVecWidthDouble()
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/OpenCLPlatform.html b/docs/jdocs/neureka/devices/opencl/OpenCLPlatform.html index 9968108d1..9798b587d 100644 --- a/docs/jdocs/neureka/devices/opencl/OpenCLPlatform.html +++ b/docs/jdocs/neureka/devices/opencl/OpenCLPlatform.html @@ -1,83 +1,114 @@ - + + - -OpenCLPlatform (neureka 1.0.0 API) - - - - + +OpenCLPlatform (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class OpenCLPlatform

    +
    neureka.devices.opencl
    +

    Class OpenCLPlatform

    -
    java.lang.Object -
    neureka.devices.opencl.OpenCLPlatform
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.opencl.OpenCLPlatform
      • +
      +
    • +
    +
    +
      +

    • -
      public class OpenCLPlatform -extends Object
      +
      +
      public class OpenCLPlatform
      +extends java.lang.Object
      This class models the OpenCL concept of platforms, which refer to device vendors / or vendor OpenCL runtime drivers. For example, in a system with 1 Intel CPU, 1 Nvidia GPUs and 2 AMD GPU, @@ -92,174 +123,289 @@

      Class OpenCLPlatform

      Then you have 2 Platforms (Intel and POCL), each with the same Intel CPU as device. - For every platform exposed by the OpenCL runtime (modelled by a CLBackend instance), - there will be a OpenCLPlatform instance.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      OpenCLPlatform(org.jocl.cl_platform_id pid)
      -
       
      + For every platform exposed by the OpenCL runtime (modelled by a CLBackend instance), + there will be a OpenCLPlatform instance.
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        OpenCLPlatform

        -
        public OpenCLPlatform(org.jocl.cl_platform_id pid)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            OpenCLPlatform

            +
            public OpenCLPlatform(org.jocl.cl_platform_id pid)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      recompile

      -
      public void recompile()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          recompile

          +
          public void recompile()
        • -
        • -
          -

          getDevices

          -
          public List<OpenCLDevice> getDevices()
          -
          +
        + + + +
          +
        • +

          getDevices

          +
          public java.util.List<OpenCLDevice> getDevices()
        • -
        • -
          -

          has

          -
          public boolean has(org.jocl.cl_device_id did)
          -
          -
          Parameters:
          +
        + + + +
          +
        • +

          has

          +
          public boolean has(org.jocl.cl_device_id did)
          +
          +
          Parameters:
          did - The cl_device_id representing an OpenCL supporting device.
          -
          Returns:
          +
          Returns:
          The truth value determining if this platform hosts the device represented by the provided id.
          -
  • -
  • -
    -

    get

    -
    public OpenCLDevice get(org.jocl.cl_device_id did)
    -
    + + + + +
      +
    • +

      get

      +
      public OpenCLDevice get(org.jocl.cl_device_id did)
    • -
    • -
      -

      getKernel

      -
      public org.jocl.cl_kernel getKernel(String kernelName)
      -
      +
    + + + +
      +
    • +

      getKernel

      +
      public org.jocl.cl_kernel getKernel(java.lang.String kernelName)
    • -
    • -
      -

      hasKernel

      -
      public boolean hasKernel(String kernelName)
      -
      +
    + + + +
      +
    • +

      hasKernel

      +
      public boolean hasKernel(java.lang.String kernelName)
    • -
    • -
      -

      getId

      -
      public final long getId()
      -
      +
    + + + +
      +
    • +

      getId

      +
      public final long getId()
    • -
    • -
      -

      getContext

      -
      public org.jocl.cl_context getContext()
      -
      +
    + + + +
      +
    • +

      getContext

      +
      public org.jocl.cl_context getContext()
    • -
    • -
      -

      dispose

      -
      public void dispose()
      -
      +
    + + + +
      +
    • +

      dispose

      +
      public void dispose()
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/StaticKernelSource.html b/docs/jdocs/neureka/devices/opencl/StaticKernelSource.html index 1d9d8721e..221ca0f21 100644 --- a/docs/jdocs/neureka/devices/opencl/StaticKernelSource.html +++ b/docs/jdocs/neureka/devices/opencl/StaticKernelSource.html @@ -1,136 +1,234 @@ - + + - -StaticKernelSource (neureka 1.0.0 API) - - - - + +StaticKernelSource (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface StaticKernelSource

    +
    neureka.devices.opencl
    +

    Interface StaticKernelSource

    -
    -
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
       
      -
      -
      +
      +
      public interface StaticKernelSource
      +extends KernelSource
      +
    • +
    -
    -

    Methods inherited from interface neureka.devices.opencl.KernelSource

    -getKernelFor
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/package-frame.html b/docs/jdocs/neureka/devices/opencl/package-frame.html new file mode 100644 index 000000000..602495fc1 --- /dev/null +++ b/docs/jdocs/neureka/devices/opencl/package-frame.html @@ -0,0 +1,33 @@ + + + + + +neureka.devices.opencl (neureka 1.0.1 API) + + + + +

    neureka.devices.opencl

    + + + diff --git a/docs/jdocs/neureka/devices/opencl/package-summary.html b/docs/jdocs/neureka/devices/opencl/package-summary.html index 274b0ba70..4b42425b3 100644 --- a/docs/jdocs/neureka/devices/opencl/package-summary.html +++ b/docs/jdocs/neureka/devices/opencl/package-summary.html @@ -1,133 +1,205 @@ - + + - -neureka.devices.opencl (neureka 1.0.0 API) - - - - + +neureka.devices.opencl (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.devices.opencl

    -
    -
    -
    package neureka.devices.opencl
    -
    -
    +
    + - -
    + + + + diff --git a/docs/jdocs/neureka/devices/opencl/package-tree.html b/docs/jdocs/neureka/devices/opencl/package-tree.html index 4f247fd50..6bb9b9c25 100644 --- a/docs/jdocs/neureka/devices/opencl/package-tree.html +++ b/docs/jdocs/neureka/devices/opencl/package-tree.html @@ -1,108 +1,167 @@ - + + - -neureka.devices.opencl Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.devices.opencl Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.devices.opencl

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/opencl/utility/CLFunctionCompiler.html b/docs/jdocs/neureka/devices/opencl/utility/CLFunctionCompiler.html index ea3740a59..c508a427f 100644 --- a/docs/jdocs/neureka/devices/opencl/utility/CLFunctionCompiler.html +++ b/docs/jdocs/neureka/devices/opencl/utility/CLFunctionCompiler.html @@ -1,164 +1,274 @@ - + + - -CLFunctionCompiler (neureka 1.0.0 API) - - - - + +CLFunctionCompiler (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class CLFunctionCompiler

    +
    neureka.devices.opencl.utility
    +

    Class CLFunctionCompiler

    -
    java.lang.Object -
    neureka.devices.opencl.utility.CLFunctionCompiler
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.opencl.utility.CLFunctionCompiler
      • +
      +
    • +
    +
    +
      +

    • -
      public final class CLFunctionCompiler -extends Object
      -
      Turns a Function into OpenCL kernel code to make +
      +
      public final class CLFunctionCompiler
      +extends java.lang.Object
      +
      Turns a Function into OpenCL kernel code to make optimized just in time compilation possible.
      -
    -
    -
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        CLFunctionCompiler

        -
        public CLFunctionCompiler(OpenCLDevice device, - Function toBeOptimized, - String functionName)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            CLFunctionCompiler

            +
            public CLFunctionCompiler(OpenCLDevice device,
            +                          Function toBeOptimized,
            +                          java.lang.String functionName)
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/utility/DeviceQuery.html b/docs/jdocs/neureka/devices/opencl/utility/DeviceQuery.html index a73be3ba2..fa2bd1625 100644 --- a/docs/jdocs/neureka/devices/opencl/utility/DeviceQuery.html +++ b/docs/jdocs/neureka/devices/opencl/utility/DeviceQuery.html @@ -1,140 +1,243 @@ - + + - -DeviceQuery (neureka 1.0.0 API) - - - - + +DeviceQuery (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class DeviceQuery

    +
    neureka.devices.opencl.utility
    +

    Class DeviceQuery

    -
    java.lang.Object -
    neureka.devices.opencl.utility.DeviceQuery
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.devices.opencl.utility.DeviceQuery
      • +
      +
    • +
    +
    +
      +

    • -
      public final class DeviceQuery -extends Object
      +
      +
      public final class DeviceQuery
      +extends java.lang.Object
      A program that queries and prints information about all available devices.
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        query

        -
        public static String query()
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            query

            +
            public static java.lang.String query()
            The entry point of this program
            -
            -
            Returns:
            +
            +
            Returns:
            A String containing a detailed summary of all OpenCL related...
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/utility/Messages.Tips.html b/docs/jdocs/neureka/devices/opencl/utility/Messages.Tips.html index 6c7dbe7ea..72f828a46 100644 --- a/docs/jdocs/neureka/devices/opencl/utility/Messages.Tips.html +++ b/docs/jdocs/neureka/devices/opencl/utility/Messages.Tips.html @@ -1,284 +1,428 @@ - + + - -Messages.Tips (neureka 1.0.0 API) - - - - + +Messages.Tips (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class Messages.Tips

    -
    -
    java.lang.Object -
    java.lang.Enum<Messages.Tips> -
    neureka.devices.opencl.utility.Messages.Tips
    +
    neureka.devices.opencl.utility
    +

    Enum Messages.Tips

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<Messages.Tips>
      • +
      • +
          +
        • neureka.devices.opencl.utility.Messages.Tips
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from class java.lang.Enum

      -Enum.EnumDesc<E extends Enum<E>>
      -
      +
      +
      public static enum Messages.Tips
      +extends java.lang.Enum<Messages.Tips>
    • - -
    • -
      -

      Enum Constant Summary

      -
      Enum Constants
      -
      -
      Enum Constant
      -
      Description
      - -
       
      - -
       
      - -
       
      - -
       
      +
    - +
    +
    +
    + -
  • -
    -

    Field Details

    -
      -
    • -
      -

      HOW_TO_INSTALL_OPENCL

      -
      public final String HOW_TO_INSTALL_OPENCL
      -
      +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          HOW_TO_INSTALL_OPENCL

          +
          public final java.lang.String HOW_TO_INSTALL_OPENCL
        • -
        • -
          -

          HOW_TO_INSTALL_OPENCL_DRIVERS

          -
          public final String HOW_TO_INSTALL_OPENCL_DRIVERS
          -
          +
        + + + +
          +
        • +

          HOW_TO_INSTALL_OPENCL_DRIVERS

          +
          public final java.lang.String HOW_TO_INSTALL_OPENCL_DRIVERS
        -
  • + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static Messages.Tips[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Messages.Tips[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Messages.Tips c : Messages.Tips.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static Messages.Tips valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static Messages.Tips valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • -
  • -
    -

    bootstrapTip

    -
    public String bootstrapTip()
    -
    + + + + +
      +
    • +

      bootstrapTip

      +
      public java.lang.String bootstrapTip()
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/utility/Messages.html b/docs/jdocs/neureka/devices/opencl/utility/Messages.html index 8c5f0a13f..23e95e41f 100644 --- a/docs/jdocs/neureka/devices/opencl/utility/Messages.html +++ b/docs/jdocs/neureka/devices/opencl/utility/Messages.html @@ -1,164 +1,279 @@ - + + - -Messages (neureka 1.0.0 API) - - - - + +Messages (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Messages

    +
    neureka.devices.opencl.utility
    +

    Class Messages

    -
    java.lang.Object -
    neureka.devices.opencl.utility.Messages
    -
    -
    -
    -
    public final class Messages -extends Object
    -
    -
    -
      - +
      +
        +
      • java.lang.Object
      • -
        -

        Nested Class Summary

        -
        Nested Classes
        -
        -
        Modifier and Type
        -
        Class
        -
        Description
        -
        static enum 
        - -
         
        +
          +
        • neureka.devices.opencl.utility.Messages
        • +
        +
      • +
      +
      +
        +
      • +
        +
        +
        public final class Messages
        +extends java.lang.Object
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        clContextCreationFailed

        -
        public static String clContextCreationFailed()
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            clContextCreationFailed

            +
            public static java.lang.String clContextCreationFailed()
          • -
          • -
            -

            clContextCouldNotFindAnyDevices

            -
            public static String clContextCouldNotFindAnyDevices()
            -
            +
          + + + +
            +
          • +

            clContextCouldNotFindAnyDevices

            +
            public static java.lang.String clContextCouldNotFindAnyDevices()
          • -
          • -
            -

            findTip

            -
            public static Messages.Tips findTip()
            -
            +
          + + + + -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/devices/opencl/utility/package-frame.html b/docs/jdocs/neureka/devices/opencl/utility/package-frame.html new file mode 100644 index 000000000..1d2b9f216 --- /dev/null +++ b/docs/jdocs/neureka/devices/opencl/utility/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.devices.opencl.utility (neureka 1.0.1 API) + + + + +

    neureka.devices.opencl.utility

    +
    +

    Classes

    + +

    Enums

    + +
    + + diff --git a/docs/jdocs/neureka/devices/opencl/utility/package-summary.html b/docs/jdocs/neureka/devices/opencl/utility/package-summary.html index dcb0c66d2..f01e93c23 100644 --- a/docs/jdocs/neureka/devices/opencl/utility/package-summary.html +++ b/docs/jdocs/neureka/devices/opencl/utility/package-summary.html @@ -1,111 +1,168 @@ - + + - -neureka.devices.opencl.utility (neureka 1.0.0 API) - - - - + +neureka.devices.opencl.utility (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.devices.opencl.utility

    -
    -
    -
    package neureka.devices.opencl.utility
    -
    -
      -
    • - -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      -
      Turns a Function into OpenCL kernel code to make +
      + -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/opencl/utility/package-tree.html b/docs/jdocs/neureka/devices/opencl/utility/package-tree.html index 76c7320e9..b913b8e38 100644 --- a/docs/jdocs/neureka/devices/opencl/utility/package-tree.html +++ b/docs/jdocs/neureka/devices/opencl/utility/package-tree.html @@ -1,87 +1,148 @@ - + + - -neureka.devices.opencl.utility Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.devices.opencl.utility Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.devices.opencl.utility

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/package-frame.html b/docs/jdocs/neureka/devices/package-frame.html new file mode 100644 index 000000000..a8ee1dc2f --- /dev/null +++ b/docs/jdocs/neureka/devices/package-frame.html @@ -0,0 +1,37 @@ + + + + + +neureka.devices (neureka 1.0.1 API) + + + + +

    neureka.devices

    + + + diff --git a/docs/jdocs/neureka/devices/package-summary.html b/docs/jdocs/neureka/devices/package-summary.html index 1ae3f49e1..9311d12d3 100644 --- a/docs/jdocs/neureka/devices/package-summary.html +++ b/docs/jdocs/neureka/devices/package-summary.html @@ -1,150 +1,230 @@ - + + - -neureka.devices (neureka 1.0.0 API) - - - - + +neureka.devices (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.devices

    -
    -
    -
    package neureka.devices
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/devices/package-tree.html b/docs/jdocs/neureka/devices/package-tree.html index d52de7790..9cc77afbe 100644 --- a/docs/jdocs/neureka/devices/package-tree.html +++ b/docs/jdocs/neureka/devices/package-tree.html @@ -1,116 +1,175 @@ - + + - -neureka.devices Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.devices Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.devices

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/dtype/DataType.html b/docs/jdocs/neureka/dtype/DataType.html index 4339f6026..7bcb39ea3 100644 --- a/docs/jdocs/neureka/dtype/DataType.html +++ b/docs/jdocs/neureka/dtype/DataType.html @@ -1,240 +1,369 @@ - + + - -DataType (neureka 1.0.0 API) - - - - + +DataType (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.dtype
    -

    Class DataType<T>

    +
    neureka.dtype
    +

    Class DataType<T>

    -
    java.lang.Object -
    neureka.dtype.DataType<T>
    -
    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.DataType<T>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      T - The type parameter of the type class whose instances ought to be represented.

      -
      public final class DataType<T> -extends Object
      +
      +
      public final class DataType<T>
      +extends java.lang.Object
      This class is a Multiton implementation for wrapping and representing type classes. - Every DataType instance uniquely wraps a Class instance which will always differ - from instances wrapped by other DataType instances. + Every DataType instance uniquely wraps a Class instance which will always differ + from instances wrapped by other DataType instances. This is because the Multiton implementation utilizes a hash map where classes are the keys and their corresponding values are DataType instances.
      -
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static <T> DataType<T> of(Class<T> typeClass)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            public static <T> DataType<T> of(java.lang.Class<T> typeClass)
          • -
          • -
            -

            getTypeClassInstance

            -
            public <T extends NumericType<?, -?, -?, -?>> T getTypeClassInstance(Class<T> type)
            -
            -
            Returns:
            +
          + + + +
            +
          • +

            getTypeClassInstance

            +
            public <T extends NumericType<?,?,?,?>> T getTypeClassInstance(java.lang.Class<T> type)
            +
            +
            Returns:
            An instance of the type class if possible.
            -
    • -
    • -
      -

      typeClassImplements

      -
      public boolean typeClassImplements(Class<?> interfaceClass)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      typeClassImplements

      +
      public boolean typeClassImplements(java.lang.Class<?> interfaceClass)
      +
      +
      Parameters:
      interfaceClass - The type class which ought to be checked for compatibility.
      -
      Returns:
      +
      Returns:
      True if the provided type is a sub-type of the type represented by this instance.
      -
    • -
    • -
      -

      dataArrayType

      -
      public Class<?> dataArrayType()
      -
      +
    + + + +
      +
    • +

      dataArrayType

      +
      public java.lang.Class<?> dataArrayType()
    • -
    • -
      -

      equals

      -
      public boolean equals(Object o)
      -
      -
      Overrides:
      -
      equals in class Object
      +
    + + + +
      +
    • +

      equals

      +
      public boolean equals(java.lang.Object o)
      +
      +
      Overrides:
      +
      equals in class java.lang.Object
      -
    • -
    • -
      -

      hashCode

      -
      public int hashCode()
      -
      -
      Overrides:
      -
      hashCode in class Object
      +
    + + + +
      +
    • +

      hashCode

      +
      public int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class java.lang.Object
      -
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    • -
    • -
      -

      getRepresentativeType

      -
      public Class<?> getRepresentativeType()
      -
      +
    + + + +
      +
    • +

      getRepresentativeType

      +
      public java.lang.Class<?> getRepresentativeType()
    • -
    • -
      -

      getItemTypeClass

      -
      public Class<T> getItemTypeClass()
      -
      +
    + + + +
      +
    • +

      getItemTypeClass

      +
      public java.lang.Class<T> getItemTypeClass()
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/NumericType.html b/docs/jdocs/neureka/dtype/NumericType.html index 6ec4f8f27..1755aef91 100644 --- a/docs/jdocs/neureka/dtype/NumericType.html +++ b/docs/jdocs/neureka/dtype/NumericType.html @@ -1,90 +1,116 @@ - + + - -NumericType (neureka 1.0.0 API) - - - - + +NumericType (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.dtype
    -

    Interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>

    +
    neureka.dtype
    +

    Interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      TargetType - The target type is the targeted JVM data-type which can represent the holder type.
      TargetArrayType - The target array type is the targeted JVM array data-type which can represent the holder array type.
      HolderType - The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint).
      HolderArrayType - The holder array type is the JVM array type which can hold the data but not necessarily represent it (int[] cant represent uint[]).
      -
      +
      All Known Implementing Classes:
      -
      F32, F64, I16, I32, I64, I8, UI16, UI32, UI64, UI8
      +
      F32, F64, I16, I32, I64, I8, UI16, UI32, UI64, UI8

      -
      public interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      +
      public interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      This interface enables "Polymorphic" utility by defining common functionalities used for handling various numeric types. Implementations of this interface are utility classes which represent the numeric data types @@ -92,410 +118,554 @@

      Interface NumericType<TargetT v

      Instances of concrete sub-types do not embody data types themselves, they simply provide standardized methods which handle the type represented by the class.

      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        signed

        -
        boolean signed()
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            signed

            +
            boolean signed()
            This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
            -
            -
            Returns:
            +
            +
            Returns:
            The truth value which defines if the represented data-type is signed.
            -
      • -
      • -
        -

        numberOfBytes

        -
        int numberOfBytes()
        -
        -
        Returns:
        +
      + + + +
        +
      • +

        numberOfBytes

        +
        int numberOfBytes()
        +
        +
        Returns:
        The number of bytes which it takes to represent the data-type.
        -
    • -
    • -
      -

      targetType

      -
      Class<TargetType> targetType()
      +
    + + + +
      +
    • +

      targetType

      +
      java.lang.Class<TargetType> targetType()
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
    • -
    • -
      -

      targetArrayType

      -
      Class<TargetArrayType> targetArrayType()
      +
    + + + +
      +
    • +

      targetArrayType

      +
      java.lang.Class<TargetArrayType> targetArrayType()
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
    • -
    • -
      -

      holderType

      -
      Class<HolderType> holderType()
      +
    + + + +
      +
    • +

      holderType

      +
      java.lang.Class<HolderType> holderType()
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
    • -
    • -
      -

      holderArrayType

      -
      Class<HolderArrayType> holderArrayType()
      +
    + + + +
      +
    • +

      holderArrayType

      +
      java.lang.Class<HolderArrayType> holderArrayType()
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
    • -
    • -
      -

      getNumericTypeTarget

      - +
    + + + +
      +
    • +

      getNumericTypeTarget

      +
      java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Returns:
      +
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
    • -
    • -
      -

      foreignHolderBytesToTarget

      -
      TargetType foreignHolderBytesToTarget(byte[] bytes)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      TargetType foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
    • -
    • -
      -

      toTarget

      -
      TargetType toTarget(HolderType original)
      -
      -
      Parameters:
      +
    + + + + + +
      +
    • +

      toTarget

      +
      TargetType toTarget(HolderType original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
    • -
    • -
      -

      targetToForeignHolderBytes

      -
      byte[] targetToForeignHolderBytes(TargetType number)
      -
      -
      Parameters:
      +
    + + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      byte[] targetToForeignHolderBytes(TargetType number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
    • -
    • -
      -

      readAndConvertForeignDataFrom

      -
      TargetArrayType readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
      +
    + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      TargetArrayType readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                              int size)
      +                                       throws java.io.IOException
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
    • -
    • -
      -

      readAndConvertForeignDataFrom

      -
      <T> TargetArrayType readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
      +
    + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      <T> TargetArrayType readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                  int size)
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
    • -
    • -
      -

      readForeignDataFrom

      -
      HolderArrayType readForeignDataFrom(DataInput stream, - int size) - throws IOException
      +
    + + + +
      +
    • +

      readForeignDataFrom

      +
      HolderArrayType readForeignDataFrom(java.io.DataInput stream,
      +                                    int size)
      +                             throws java.io.IOException
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
    • -
    • -
      -

      readForeignDataFrom

      -
      <T> HolderArrayType readForeignDataFrom(Iterator<T> iterator, - int size)
      +
    + + + +
      +
    • +

      readForeignDataFrom

      +
      <T> HolderArrayType readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                        int size)
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
    • -
    • -
      -

      writeDataTo

      -
      void writeDataTo(DataOutput stream, - Iterator<TargetType> iterator) - throws IOException
      +
    + + + +
      +
    • +

      writeDataTo

      +
      void writeDataTo(java.io.DataOutput stream,
      +                 java.util.Iterator<TargetType> iterator)
      +          throws java.io.IOException
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
    • -
    • -
      -

      convertToHolder

      -
      HolderType convertToHolder(Object from)
      +
    + + + +
      +
    • +

      convertToHolder

      +
      HolderType convertToHolder(java.lang.Object from)
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
    • -
    • -
      -

      convertToHolderArray

      -
      HolderArrayType convertToHolderArray(Object from)
      +
    + + + +
      +
    • +

      convertToHolderArray

      +
      HolderArrayType convertToHolderArray(java.lang.Object from)
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
    • -
    • -
      -

      convertToTarget

      -
      TargetType convertToTarget(Object from)
      +
    + + + +
      +
    • +

      convertToTarget

      +
      TargetType convertToTarget(java.lang.Object from)
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
    • -
    • -
      -

      convertToTargetArray

      -
      TargetArrayType convertToTargetArray(Object from)
      +
    + + + +
      +
    • +

      convertToTargetArray

      +
      TargetArrayType convertToTargetArray(java.lang.Object from)
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/F32.html b/docs/jdocs/neureka/dtype/custom/F32.html index e9c75249b..ddfafc57e 100644 --- a/docs/jdocs/neureka/dtype/custom/F32.html +++ b/docs/jdocs/neureka/dtype/custom/F32.html @@ -1,540 +1,722 @@ - + + - -F32 (neureka 1.0.0 API) - - - - + +F32 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class F32

    +
    neureka.dtype.custom
    +

    Class F32

    -
    java.lang.Object -
    neureka.dtype.custom.F32
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.F32
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      F32()
      -
       
      +
      +
      public final class F32
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        F32

        -
        public F32()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            F32

            +
            public F32()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Float> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Float> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<float[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<float[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Float> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Float> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<float[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<float[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Float foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Float foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Float toTarget(Float original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Float toTarget(java.lang.Float original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Float number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Float number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public float[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public float[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                             int size)
      +                                      throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> float[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> float[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                 int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public float[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public float[] readForeignDataFrom(java.io.DataInput stream,
      +                                   int size)
      +                            throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> float[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> float[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                       int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Float convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Float convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public float[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public float[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Float convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Float convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public float[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public float[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Float,float[],Float,float[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Float> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/F64.html b/docs/jdocs/neureka/dtype/custom/F64.html index e8d90c2ba..ae8bcb6ea 100644 --- a/docs/jdocs/neureka/dtype/custom/F64.html +++ b/docs/jdocs/neureka/dtype/custom/F64.html @@ -1,540 +1,722 @@ - + + - -F64 (neureka 1.0.0 API) - - - - + +F64 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class F64

    +
    neureka.dtype.custom
    +

    Class F64

    -
    java.lang.Object -
    neureka.dtype.custom.F64
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.F64
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      F64()
      -
       
      +
      +
      public final class F64
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        F64

        -
        public F64()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            F64

            +
            public F64()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Double> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Double> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<double[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<double[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Double> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Double> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<double[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<double[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Double foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Double foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Double toTarget(Double original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Double toTarget(java.lang.Double original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Double number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Double number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public double[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public double[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                              int size)
      +                                       throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> double[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> double[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                  int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public double[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public double[] readForeignDataFrom(java.io.DataInput stream,
      +                                    int size)
      +                             throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> double[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> double[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                        int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Double convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Double convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public double[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public double[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Double convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Double convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public double[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public double[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Double,double[],Double,double[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Double> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/I16.html b/docs/jdocs/neureka/dtype/custom/I16.html index 506d88fd5..3362e7290 100644 --- a/docs/jdocs/neureka/dtype/custom/I16.html +++ b/docs/jdocs/neureka/dtype/custom/I16.html @@ -1,540 +1,722 @@ - + + - -I16 (neureka 1.0.0 API) - - - - + +I16 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class I16

    +
    neureka.dtype.custom
    +

    Class I16

    -
    java.lang.Object -
    neureka.dtype.custom.I16
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.I16
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      I16()
      -
       
      +
      +
      public final class I16
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        I16

        -
        public I16()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            I16

            +
            public I16()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Short> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Short> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<short[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<short[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Short> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Short> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<short[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<short[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Short foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Short foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Short toTarget(Short original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Short toTarget(java.lang.Short original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Short number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Short number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public short[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public short[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                             int size)
      +                                      throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> short[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> short[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                 int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public short[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public short[] readForeignDataFrom(java.io.DataInput stream,
      +                                   int size)
      +                            throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> short[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> short[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                       int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Short convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Short convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public short[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public short[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Short convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Short convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public short[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public short[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Short,short[],Short,short[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Short> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/I32.html b/docs/jdocs/neureka/dtype/custom/I32.html index 05d09491c..f3049c269 100644 --- a/docs/jdocs/neureka/dtype/custom/I32.html +++ b/docs/jdocs/neureka/dtype/custom/I32.html @@ -1,540 +1,722 @@ - + + - -I32 (neureka 1.0.0 API) - - - - + +I32 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class I32

    +
    neureka.dtype.custom
    +

    Class I32

    -
    java.lang.Object -
    neureka.dtype.custom.I32
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.I32
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      I32()
      -
       
      +
      +
      public final class I32
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        I32

        -
        public I32()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            I32

            +
            public I32()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Integer> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Integer> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<int[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<int[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Integer> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Integer> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<int[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<int[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Integer foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Integer foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Integer toTarget(Integer original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Integer toTarget(java.lang.Integer original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Integer number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Integer number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public int[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public int[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                           int size)
      +                                    throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> int[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> int[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                               int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public int[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public int[] readForeignDataFrom(java.io.DataInput stream,
      +                                 int size)
      +                          throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> int[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> int[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                     int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Integer convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Integer convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public int[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public int[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Integer convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Integer convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public int[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public int[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Integer,int[],Integer,int[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Integer> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/I64.html b/docs/jdocs/neureka/dtype/custom/I64.html index 55068a8b2..c84a942f1 100644 --- a/docs/jdocs/neureka/dtype/custom/I64.html +++ b/docs/jdocs/neureka/dtype/custom/I64.html @@ -1,540 +1,722 @@ - + + - -I64 (neureka 1.0.0 API) - - - - + +I64 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class I64

    +
    neureka.dtype.custom
    +

    Class I64

    -
    java.lang.Object -
    neureka.dtype.custom.I64
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.I64
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      I64()
      -
       
      +
      +
      public final class I64
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        I64

        -
        public I64()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            I64

            +
            public I64()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Long> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Long> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<long[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<long[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Long> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Long> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<long[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<long[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Long foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Long foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Long toTarget(Long original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Long toTarget(java.lang.Long original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Long number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Long number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public long[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public long[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                            int size)
      +                                     throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> long[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> long[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public long[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public long[] readForeignDataFrom(java.io.DataInput stream,
      +                                  int size)
      +                           throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> long[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> long[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                      int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Long convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Long convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public long[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public long[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Long convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Long convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public long[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public long[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Long,long[],Long,long[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Long> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/I8.html b/docs/jdocs/neureka/dtype/custom/I8.html index 6f0d6ce0d..974403b28 100644 --- a/docs/jdocs/neureka/dtype/custom/I8.html +++ b/docs/jdocs/neureka/dtype/custom/I8.html @@ -1,542 +1,724 @@ - + + - -I8 (neureka 1.0.0 API) - - - - + +I8 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class I8

    +
    neureka.dtype.custom
    +

    Class I8

    -
    java.lang.Object -
    neureka.dtype.custom.I8
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.I8
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      NumericType<Byte,byte[],Byte,byte[]>
      +
      NumericType<java.lang.Byte,byte[],java.lang.Byte,byte[]>

      -
      public final class I8 -extends Object
      +
      +
      public final class I8
      +extends java.lang.Object
      The following abstract class implements some basic logic which is applicable across all final concrete classes extending this abstract one.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      I8()
      -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        I8

        -
        public I8()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            I8

            +
            public I8()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Byte> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Byte> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<byte[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<byte[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Byte> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Byte> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<byte[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<byte[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Byte foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Byte foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Byte toTarget(Byte original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Byte toTarget(java.lang.Byte original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Byte number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Byte number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public byte[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public byte[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                            int size)
      +                                     throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> byte[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> byte[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public byte[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public byte[] readForeignDataFrom(java.io.DataInput stream,
      +                                  int size)
      +                           throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> byte[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> byte[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                      int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Byte convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Byte convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public byte[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public byte[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Byte convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Byte convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public byte[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public byte[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Byte,byte[],Byte,byte[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Byte> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/UI16.html b/docs/jdocs/neureka/dtype/custom/UI16.html index 1da34f8b1..69e83637d 100644 --- a/docs/jdocs/neureka/dtype/custom/UI16.html +++ b/docs/jdocs/neureka/dtype/custom/UI16.html @@ -1,540 +1,722 @@ - + + - -UI16 (neureka 1.0.0 API) - - - - + +UI16 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class UI16

    +
    neureka.dtype.custom
    +

    Class UI16

    -
    java.lang.Object -
    neureka.dtype.custom.UI16
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.UI16
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public final class UI16
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        UI16

        -
        public UI16()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            UI16

            +
            public UI16()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Integer> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Integer> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<int[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<int[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Short> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Short> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<short[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<short[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Integer foreignHolderBytesToTarget(byte[] b)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Integer foreignHolderBytesToTarget(byte[] b)
      +
      +
      Parameters:
      b - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Integer toTarget(Short original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Integer toTarget(java.lang.Short original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Integer number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Integer number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public int[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public int[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                           int size)
      +                                    throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> int[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> int[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                               int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public short[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public short[] readForeignDataFrom(java.io.DataInput stream,
      +                                   int size)
      +                            throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> short[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> short[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                       int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Short convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Short convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public short[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public short[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Integer convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Integer convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public int[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public int[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Integer,int[],Integer,int[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Integer> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/UI32.html b/docs/jdocs/neureka/dtype/custom/UI32.html index 90194d8c3..068108a8e 100644 --- a/docs/jdocs/neureka/dtype/custom/UI32.html +++ b/docs/jdocs/neureka/dtype/custom/UI32.html @@ -1,540 +1,722 @@ - + + - -UI32 (neureka 1.0.0 API) - - - - + +UI32 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class UI32

    +
    neureka.dtype.custom
    +

    Class UI32

    -
    java.lang.Object -
    neureka.dtype.custom.UI32
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.UI32
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public final class UI32
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        UI32

        -
        public UI32()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            UI32

            +
            public UI32()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Long> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Long> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<long[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<long[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Integer> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Integer> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<int[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<int[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Long foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Long foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Long toTarget(Integer original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Long toTarget(java.lang.Integer original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Long number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Long number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public long[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public long[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                            int size)
      +                                     throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> long[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> long[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public int[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public int[] readForeignDataFrom(java.io.DataInput stream,
      +                                 int size)
      +                          throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> int[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> int[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                     int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Integer convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Integer convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public int[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public int[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Long convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Long convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public long[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public long[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Long,long[],Long,long[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Long> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/UI64.html b/docs/jdocs/neureka/dtype/custom/UI64.html index 0fb734e46..f05ac6631 100644 --- a/docs/jdocs/neureka/dtype/custom/UI64.html +++ b/docs/jdocs/neureka/dtype/custom/UI64.html @@ -1,540 +1,722 @@ - + + - -UI64 (neureka 1.0.0 API) - - - - + +UI64 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class UI64

    +
    neureka.dtype.custom
    +

    Class UI64

    -
    java.lang.Object -
    neureka.dtype.custom.UI64
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.UI64
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public final class UI64
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        UI64

        -
        public UI64()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            UI64

            +
            public UI64()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<BigInteger> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.math.BigInteger> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<BigInteger[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<java.math.BigInteger[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Long> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Long> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<long[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<long[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public BigInteger foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.math.BigInteger foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public BigInteger toTarget(Long original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.math.BigInteger toTarget(java.lang.Long original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(BigInteger b)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.math.BigInteger b)
      +
      +
      Parameters:
      b - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public BigInteger[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public java.math.BigInteger[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                                            int size)
      +                                                     throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> BigInteger[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> java.math.BigInteger[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                                int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public long[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public long[] readForeignDataFrom(java.io.DataInput stream,
      +                                  int size)
      +                           throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> long[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> long[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                      int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Long convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Long convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public long[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public long[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public BigInteger convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.math.BigInteger convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public BigInteger[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public java.math.BigInteger[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<BigInteger,BigInteger[],BigInteger,BigInteger[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<BigInteger> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/UI8.html b/docs/jdocs/neureka/dtype/custom/UI8.html index 2a338b441..cea607c24 100644 --- a/docs/jdocs/neureka/dtype/custom/UI8.html +++ b/docs/jdocs/neureka/dtype/custom/UI8.html @@ -1,540 +1,722 @@ - + + - -UI8 (neureka 1.0.0 API) - - - - + +UI8 (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class UI8

    +
    neureka.dtype.custom
    +

    Class UI8

    -
    java.lang.Object -
    neureka.dtype.custom.UI8
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.dtype.custom.UI8
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      UI8()
      -
       
      +
      +
      public final class UI8
      +extends java.lang.Object
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        UI8

        -
        public UI8()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            UI8

            +
            public UI8()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      signed

      -
      public boolean signed()
      -
      Description copied from interface: NumericType
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          signed

          +
          public boolean signed()
          +
          Description copied from interface: NumericType
          This boolean value tells if the data-type represented by concrete instances of implementations of this interface is signed!
          -
          -
          Returns:
          +
          +
          Returns:
          The truth value which defines if the represented data-type is signed.
          -
    • -
    • -
      -

      numberOfBytes

      -
      public int numberOfBytes()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      numberOfBytes

      +
      public int numberOfBytes()
      +
      +
      Returns:
      The number of bytes which it takes to represent the data-type.
      -
  • -
  • -
    -

    targetType

    -
    public Class<Short> targetType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetType

      +
      public java.lang.Class<java.lang.Short> targetType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type class which can represent the holder type.
      -
  • -
  • -
    -

    targetArrayType

    -
    public Class<short[]> targetArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      targetArrayType

      +
      public java.lang.Class<short[]> targetArrayType()
      +
      Description copied from interface: NumericType
      The target type is the targeted JVM data-type which can represent the holder type. This method returns the class object of an array of this target type.
      -
      -
      Returns:
      +
      +
      Returns:
      The targeted JVM data-type array class which can represent the holder array type.
      -
  • -
  • -
    -

    holderType

    -
    public Class<Byte> holderType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderType

      +
      public java.lang.Class<java.lang.Byte> holderType()
      +
      Description copied from interface: NumericType
      The holder type is the JVM type which can hold the data but not necessarily represent it (int cant represent uint). This method returns the class object of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder type class which can hold the data type but not necessarily represent it (int cant represent uint).
      -
  • -
  • -
    -

    holderArrayType

    -
    public Class<byte[]> holderArrayType()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      holderArrayType

      +
      public java.lang.Class<byte[]> holderArrayType()
      +
      Description copied from interface: NumericType
      The holder array type is the JVM type which can hold the data but not necessarily represent it (int[] cant represent uint[]). This method returns the array class object of an array of this holder type.
      -
      -
      Returns:
      +
      +
      Returns:
      The holder array type class which can hold the data type array but not necessarily represent it (int[] cant represent uint[]).
      -
  • -
  • -
    -

    foreignHolderBytesToTarget

    -
    public Short foreignHolderBytesToTarget(byte[] bytes)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      foreignHolderBytesToTarget

      +
      public java.lang.Short foreignHolderBytesToTarget(byte[] bytes)
      +
      +
      Parameters:
      bytes - The raw bytes of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of a target type built based on the provided holder bytes.
      -
  • -
  • -
    -

    toTarget

    -
    public Short toTarget(Byte original)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      toTarget

      +
      public java.lang.Short toTarget(java.lang.Byte original)
      +
      +
      Parameters:
      original - An instance of a holder type which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type converted from the provided holder type.
      -
  • -
  • -
    -

    targetToForeignHolderBytes

    -
    public byte[] targetToForeignHolderBytes(Short number)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      targetToForeignHolderBytes

      +
      public byte[] targetToForeignHolderBytes(java.lang.Short number)
      +
      +
      Parameters:
      number - An instance of a target type which ought to be converted to holder bytes.
      -
      Returns:
      +
      Returns:
      Holder bytes converted from an instance of the target type.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public short[] readAndConvertForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public short[] readAndConvertForeignDataFrom(java.io.DataInput stream,
      +                                             int size)
      +                                      throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readAndConvertForeignDataFrom

    -
    public <T> short[] readAndConvertForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readAndConvertForeignDataFrom

      +
      public <T> short[] readAndConvertForeignDataFrom(java.util.Iterator<T> iterator,
      +                                                 int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then ought to convert these to an array of target types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of target types converted from the holder type elements read from the iterator.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public byte[] readForeignDataFrom(DataInput stream, - int size) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public byte[] readForeignDataFrom(java.io.DataInput stream,
      +                                  int size)
      +                           throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method expects the provided stream to spit out bytes which can be read as target type elements. It then ought to convert these to an array of holder types of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      stream - A DataInput stream whose data ought to be read as target type elements.
      size - The number of elements which ought to be read.
      -
      Returns:
      +
      Returns:
      An array of holder types converted from the target type elements read from the stream.
      -
      Throws:
      -
      IOException - If reading from the stream was not successful.
      +
      Throws:
      +
      java.io.IOException - If reading from the stream was not successful.
      -
  • -
  • -
    -

    readForeignDataFrom

    -
    public <T> byte[] readForeignDataFrom(Iterator<T> iterator, - int size)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      readForeignDataFrom

      +
      public <T> byte[] readForeignDataFrom(java.util.Iterator<T> iterator,
      +                                      int size)
      +
      Description copied from interface: NumericType
      This method expects the provided iterator to return elements which can be read as holder type elements. It then will write them into an array of holder types of the specified size.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The generic type parameter for the iterator.
      -
      Parameters:
      +
      Parameters:
      iterator - An iterator whose elements ought to be understood as holder type elements.
      size - The number of elements which ought to be read and then written into an array.
      -
      Returns:
      +
      Returns:
      An array of holder types populated by holder elements read from the iterator.
      -
  • -
  • -
    -

    convertToHolder

    -
    public Byte convertToHolder(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolder

      +
      public java.lang.Byte convertToHolder(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder type.
      -
      Returns:
      +
      Returns:
      An instance of the holder type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToHolderArray

    -
    public byte[] convertToHolderArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToHolderArray

      +
      public byte[] convertToHolderArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the HolderType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the holder array type.
      -
      Returns:
      +
      Returns:
      An instance of the holder array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTarget

    -
    public Short convertToTarget(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTarget

      +
      public java.lang.Short convertToTarget(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target type.
      -
      Returns:
      +
      Returns:
      An instance of the target type based on the conversion of the provided object.
      -
  • -
  • -
    -

    convertToTargetArray

    -
    public short[] convertToTargetArray(Object from)
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      convertToTargetArray

      +
      public short[] convertToTargetArray(java.lang.Object from)
      +
      Description copied from interface: NumericType
      This method is a generic converter from any object to an instance of the TargetArrayType parameter specified by an implementation of this interface.
      -
      -
      Parameters:
      +
      +
      Parameters:
      from - The object which ought to be converted to an instance of the target array type.
      -
      Returns:
      +
      Returns:
      An instance of the target array type based on the conversion of the provided object.
      -
  • -
  • -
    -

    getNumericTypeTarget

    -
    public Class<NumericType<Short,short[],Short,short[]>> getNumericTypeTarget()
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      getNumericTypeTarget

      +
      public java.lang.Class<NumericType<TargetType,TargetArrayType,TargetType,TargetArrayType>> getNumericTypeTarget()
      +
      Description copied from interface: NumericType
      This method returns the NumericType representation of the target type of this class. An example would be the UI32 (32 bit integer) which can be represented by the NumericType implementation I64 (64 bit integer). If this NumericType representation can represent itself, meaning its target type is also the holder type like for instance I32, then this method will simply return its own class!
      -
      -
      Specified by:
      -
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Returns:
      +
      +
      Specified by:
      +
      getNumericTypeTarget in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Returns:
      The NumericType representation of the target type class which can represent the data type of this class.
      -
  • -
  • -
    -

    writeDataTo

    -
    public void writeDataTo(DataOutput stream, - Iterator<Short> iterator) - throws IOException
    -
    Description copied from interface: NumericType
    + + + + +
      +
    • +

      writeDataTo

      +
      public void writeDataTo(java.io.DataOutput stream,
      +                        java.util.Iterator<TargetType> iterator)
      +                 throws java.io.IOException
      +
      Description copied from interface: NumericType
      This method writes all the target type elements returned by the provided iterator and write them into the provided "DataOutput" stream as bytes.
      -
      -
      Specified by:
      -
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      Parameters:
      +
      +
      Specified by:
      +
      writeDataTo in interface NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      +
      Parameters:
      stream - The output stream which ought to be feed with bytes of the elements provided by the iterator.
      iterator - The iterator whose returned elements ought to be translated into bytes for the output stream.
      -
      Throws:
      -
      IOException - An IOException forcing the caller to handle in case the conversion fails.
      +
      Throws:
      +
      java.io.IOException - An IOException forcing the caller to handle in case the conversion fails.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/dtype/custom/package-frame.html b/docs/jdocs/neureka/dtype/custom/package-frame.html new file mode 100644 index 000000000..483df3172 --- /dev/null +++ b/docs/jdocs/neureka/dtype/custom/package-frame.html @@ -0,0 +1,28 @@ + + + + + +neureka.dtype.custom (neureka 1.0.1 API) + + + + +

    neureka.dtype.custom

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/dtype/custom/package-summary.html b/docs/jdocs/neureka/dtype/custom/package-summary.html index c704f0364..c31019249 100644 --- a/docs/jdocs/neureka/dtype/custom/package-summary.html +++ b/docs/jdocs/neureka/dtype/custom/package-summary.html @@ -1,119 +1,191 @@ - + + - -neureka.dtype.custom (neureka 1.0.0 API) - - - - + +neureka.dtype.custom (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.dtype.custom

    -
    -
    -
    package neureka.dtype.custom
    -
    +

    Package neureka.dtype.custom

    +
    Everything in this package should be considered library-private! DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! Code inside this package or any sub-packages might change frequently...
    -
    -
    -
      -
    • - -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
       
      - -
       
      - -
       
      - -
       
      - -
       
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        F32 
        F64 
        I16 
        I32 
        I64 
        I8
        The following abstract class implements some basic logic which is applicable across all final concrete classes extending this abstract one.
        - - -
         
        - -
         
        - -
         
        - -
         
        - - +
        UI16 
        UI32 
        UI64 
        UI8 
      -
    -
    + + + +

    Package neureka.dtype.custom Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    + + + + diff --git a/docs/jdocs/neureka/dtype/custom/package-tree.html b/docs/jdocs/neureka/dtype/custom/package-tree.html index 5378e3a74..6722c84e7 100644 --- a/docs/jdocs/neureka/dtype/custom/package-tree.html +++ b/docs/jdocs/neureka/dtype/custom/package-tree.html @@ -1,80 +1,143 @@ - + + - -neureka.dtype.custom Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.dtype.custom Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.dtype.custom

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.dtype.custom.F32
      • -
      • neureka.dtype.custom.F64
      • -
      • neureka.dtype.custom.I16
      • -
      • neureka.dtype.custom.I32
      • -
      • neureka.dtype.custom.I64
      • -
      • neureka.dtype.custom.I8
      • -
      • neureka.dtype.custom.UI16
      • -
      • neureka.dtype.custom.UI32
      • -
      • neureka.dtype.custom.UI64
      • -
      • neureka.dtype.custom.UI8
      • +
      • neureka.dtype.custom.F32
      • +
      • neureka.dtype.custom.F64
      • +
      • neureka.dtype.custom.I16
      • +
      • neureka.dtype.custom.I32
      • +
      • neureka.dtype.custom.I64
      • +
      • neureka.dtype.custom.I8
      • +
      • neureka.dtype.custom.UI16
      • +
      • neureka.dtype.custom.UI32
      • +
      • neureka.dtype.custom.UI64
      • +
      • neureka.dtype.custom.UI8
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/dtype/package-frame.html b/docs/jdocs/neureka/dtype/package-frame.html new file mode 100644 index 000000000..5f2d38375 --- /dev/null +++ b/docs/jdocs/neureka/dtype/package-frame.html @@ -0,0 +1,23 @@ + + + + + +neureka.dtype (neureka 1.0.1 API) + + + + +

    neureka.dtype

    +
    +

    Interfaces

    + +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/dtype/package-summary.html b/docs/jdocs/neureka/dtype/package-summary.html index 1194d0137..84c8f1ef4 100644 --- a/docs/jdocs/neureka/dtype/package-summary.html +++ b/docs/jdocs/neureka/dtype/package-summary.html @@ -1,112 +1,159 @@ - + + - -neureka.dtype (neureka 1.0.0 API) - - - - + +neureka.dtype (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.dtype

    -
    -
    -
    package neureka.dtype
    -
    -
      -
    • - -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      -
      This class is a Multiton implementation for wrapping and representing type classes.
      -
      -
      NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
      -
      +
      +
        +
      • + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
        This interface enables "Polymorphic" utility by defining common functionalities used for handling various numeric types.
        - - - - +
        +
      • +
      • + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        DataType<T> +
        This class is a Multiton implementation for wrapping and representing type classes.
        +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/dtype/package-tree.html b/docs/jdocs/neureka/dtype/package-tree.html index 87fc2fd30..2eea85eb9 100644 --- a/docs/jdocs/neureka/dtype/package-tree.html +++ b/docs/jdocs/neureka/dtype/package-tree.html @@ -1,77 +1,138 @@ - + + - -neureka.dtype Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.dtype Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.dtype

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

      -
    • neureka.dtype.NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
    • +
    • neureka.dtype.NumericType<TargetType,TargetArrayType,HolderType,HolderArrayType>
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/fluent/building/NdaBuilder.html b/docs/jdocs/neureka/fluent/building/NdaBuilder.html index ac731351d..3342d1e63 100644 --- a/docs/jdocs/neureka/fluent/building/NdaBuilder.html +++ b/docs/jdocs/neureka/fluent/building/NdaBuilder.html @@ -1,93 +1,124 @@ - + + - -NdaBuilder (neureka 1.0.0 API) - - - - + +NdaBuilder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class NdaBuilder<V>

    -
    -
    java.lang.Object -
    neureka.fluent.building.NdaBuilder<V>
    +
    neureka.fluent.building
    +

    Class NdaBuilder<V>

    -
    -
    -
    Type Parameters:
    -
    V - The type of the values which ought to be represented by the Tensor built by this NdaBuilder.
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.fluent.building.NdaBuilder<V>
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      -
      NdaBuilder(Class<V> typeClass)
      -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        NdaBuilder

        -
        public NdaBuilder(Class<V> typeClass)
        -
        -
        Parameters:
        -
        typeClass - The type of the values which ought to be represented by the Tensor built by this NdaBuilder.
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            NdaBuilder

            +
            public NdaBuilder(java.lang.Class<V> typeClass)
            +
            +
            Parameters:
            +
            typeClass - The type of the values which ought to be represented by the Tensor built by this NdaBuilder.
            -
      -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      andFill

      -
      @SafeVarargs -public final Tensor<V> andFill(V... values)
      -
      Description copied from interface: IterByOrIterFromOrAllTensor
      +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          andFill

          +
          @SafeVarargs
          +public final Tensor<V> andFill(V... values)
          +
          Description copied from interface: IterByOrIterFromOrAllTensor
          Provide an array of values which will be used to fill - the Tensor instance returned by this last fluent builder method. + the Tensor instance returned by this last fluent builder method. If the configured tensor is larger than the number of provided elements, then they will simply be read multiple times until the result has been sufficiently populated.
          -
          -
          Specified by:
          -
          andFill in interface IterByOrIterFromOrAll<V>
          -
          Specified by:
          -
          andFill in interface IterByOrIterFromOrAllTensor<V>
          -
          Parameters:
          -
          values - The values which will recurrently populate the returned Tensor with values until it is filled.
          -
          Returns:
          -
          A new Tensor instance populated by the array of values supplied to this method.
          +
          +
          Specified by:
          +
          andFill in interface IterByOrIterFromOrAll<V>
          +
          Specified by:
          +
          andFill in interface IterByOrIterFromOrAllTensor<V>
          +
          Parameters:
          +
          values - The values which will recurrently populate the returned Tensor with values until it is filled.
          +
          Returns:
          +
          A new Tensor instance populated by the array of values supplied to this method.
          -
    • -
    • -
      -

      andWhere

      -
      public Tensor<V> andWhere(Filler<V> filler)
      -
      This method receives an Filler lambda which will be - used to populate the Tensor instance produced by this API with values.
      -
      -
      Specified by:
      -
      andWhere in interface IterByOrIterFromOrAll<V>
      -
      Specified by:
      -
      andWhere in interface IterByOrIterFromOrAllTensor<V>
      -
      Parameters:
      -
      filler - The Filler which ought to populate the returned Tensor.
      -
      Returns:
      -
      A new Tensor instance populated by the lambda supplied to this method.
      +
    + + + +
  • -
  • -
    -

    andFillFrom

    -
    public ToForTensor<V> andFillFrom(V index)
    -
    Description copied from interface: IterByOrIterFromOrAllTensor
    + + + + + + +
      +
    • +

      andFillFrom

      +
      public ToForTensor<V> andFillFrom(V index)
      +
      Description copied from interface: IterByOrIterFromOrAllTensor
      This part of the builder API allows for specifying a range which starts from the provided value and will end at the value specified in the next builder step returned by this method. If the number in the created range is not sufficiently large enough to - fully populate the final Tensor instance built by this API, then the resulting + fully populate the final Tensor instance built by this API, then the resulting range will fill the underlying data array of the tensor recurrently.
      -
      -
      Specified by:
      -
      andFillFrom in interface IterByOrIterFromOrAll<V>
      -
      Specified by:
      -
      andFillFrom in interface IterByOrIterFromOrAllTensor<V>
      -
      Parameters:
      -
      index - The start of the range which ought to supply the Tensor instance built by this API.
      -
      Returns:
      +
      +
      Specified by:
      +
      andFillFrom in interface IterByOrIterFromOrAll<V>
      +
      Specified by:
      +
      andFillFrom in interface IterByOrIterFromOrAllTensor<V>
      +
      Parameters:
      +
      index - The start of the range which ought to supply the Tensor instance built by this API.
      +
      Returns:
      The next step in the builder method chain which expects to receive the end point of the range.
      -
  • -
  • -
    -

    all

    -
    public Tensor<V> all(V value)
    -
    Description copied from interface: IterByOrIterFromOrAllTensor
    -
    This method creates and return a Tensor instance which + + + + + + +
  • -
  • -
    -

    andSeed

    -
    public Tensor<V> andSeed(Object seed)
    -
    Description copied from interface: IterByOrIterFromOrAllTensor
    -
    This method creates and return a Tensor instance which + + + + +
  • -
  • -
    -

    withShape

    -
    public IterByOrIterFromOrAllTensor<V> withShape(int... shape)
    -
    Description copied from interface: WithShapeOrScalarOrVectorTensor
    + + + + +
  • -
  • -
    -

    vector

    -
    public Tensor<V> vector(Object[] values)
    -
    Description copied from interface: WithShapeOrScalarOrVectorTensor
    -
    This method creates and returns a vector Tensor instance + + + + +
  • -
  • -
    -

    scalar

    -
    public Tensor<V> scalar(V value)
    -
    Description copied from interface: WithShapeOrScalarOrVectorTensor
    -
    This method created and return a scalar Tensor instance + + + + + + +
  • -
  • -
    -

    to

    -
    public StepForTensor<V> to(V index)
    -
    Description copied from interface: ToForTensor
    + + + + + + +
  • +
    +
    Specified by:
    +
    to in interface To<V>
    +
    Specified by:
    +
    to in interface ToForTensor<V>
    +
    Parameters:
    +
    index - The end point of the range previously specified in IterByOrIterFromOrAll.andFillFrom(Object).
    +
    Returns:
    +
    The last step in the call transition graph of the fluent builder API for building range based Tensor instances.
    - -
  • -
    -

    step

    -
    public Tensor<V> step(double size)
    -
    Description copied from interface: StepForTensor
    + + + + +
  • +
    +
    Specified by:
    +
    step in interface Step<V>
    +
    Specified by:
    +
    step in interface StepForTensor<V>
    +
    Parameters:
    size - The size of the step within the range defined by this fluent builder API.
    -
    Returns:
    -
    A new Tensor instance whose contents are filled based on the provided range.
    +
    Returns:
    +
    A new Tensor instance whose contents are filled based on the provided range.
    - -
  • -
    -

    on

    -
    public WithShapeOrScalarOrVectorTensor<V> on(Device<V> device)
    -
    Description copied from interface: WithShapeOrScalarOrVectorOnDevice
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/package-frame.html b/docs/jdocs/neureka/fluent/building/package-frame.html new file mode 100644 index 000000000..bc62be33d --- /dev/null +++ b/docs/jdocs/neureka/fluent/building/package-frame.html @@ -0,0 +1,19 @@ + + + + + +neureka.fluent.building (neureka 1.0.1 API) + + + + +

    neureka.fluent.building

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/fluent/building/package-summary.html b/docs/jdocs/neureka/fluent/building/package-summary.html index 428e2742c..881d4896e 100644 --- a/docs/jdocs/neureka/fluent/building/package-summary.html +++ b/docs/jdocs/neureka/fluent/building/package-summary.html @@ -1,95 +1,141 @@ - + + - -neureka.fluent.building (neureka 1.0.0 API) - - - - + +neureka.fluent.building (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.fluent.building

    -
    -
    -
    package neureka.fluent.building
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/fluent/building/package-tree.html b/docs/jdocs/neureka/fluent/building/package-tree.html index 0e23265a3..00d68f65f 100644 --- a/docs/jdocs/neureka/fluent/building/package-tree.html +++ b/docs/jdocs/neureka/fluent/building/package-tree.html @@ -1,71 +1,134 @@ - + + - -neureka.fluent.building Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.fluent.building Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.fluent.building

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAll.html b/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAll.html index 744714ad0..8652f6eb9 100644 --- a/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAll.html +++ b/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAll.html @@ -1,261 +1,381 @@ - + + - -IterByOrIterFromOrAll (neureka 1.0.0 API) - - - - + +IterByOrIterFromOrAll (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface IterByOrIterFromOrAll<V>

    +
    neureka.fluent.building.states
    +

    Interface IterByOrIterFromOrAll<V>

    -
    -
    +
    +
    +
    -
    -
      +
      +
      public interface IterByOrIterFromOrAll<V>
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      all(V value)
      -
      -
      This method creates and return a Tensor instance which +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        Nda<V>all(V value) +
        This method creates and return a Tensor instance which will be homogeneously filled by the the provided value irrespective of the previously defined shape.
        - - -
        andFill(List<V> values)
        -
        +
        Nda<V>andFill(java.util.List<V> values)
        Provide a list of values which will be used to fill - the Tensor instance returned by this last fluent builder method.
        - - -
        andFill(V... values)
        -
        + the Tensor instance returned by this last fluent builder method.
        +
        Nda<V>andFill(V... values)
        Provide an array of values which will be used to fill - the Tensor instance returned by this last fluent builder method.
        - -
        To<V>
        -
        andFillFrom(V index)
        -
        + the Tensor instance returned by this last fluent builder method.
        +
        To<V>andFillFrom(V index)
        This part of the builder API allows for specifying a range which starts from the provided value and will end at the value specified in the next builder step returned by this method.
        - - - -
        -
        This method creates and return a Tensor instance which +
        Nda<V>andSeed(java.lang.Object seed) +
        This method creates and return a Tensor instance which will be filled based on the provided seed object.
        - - -
        andWhere(Filler<V> filler)
        -
        -
        Pass a lambda to this method which will be used to populate the Tensor +
        Nda<V>andWhere(Filler<V> filler) +
        Pass a lambda to this method which will be used to populate the Tensor built by this fluent builder API based on the indices of the tensor.
        - - - - - +
      -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        andFill

        -
        Nda<V> andFill(V... values)
        +
          +
        • + + +

          Method Detail

          + + + + + +
            +
          • +

            andFill

            +
            Nda<V> andFill(V... values)
            Provide an array of values which will be used to fill - the Tensor instance returned by this last fluent builder method. + the Tensor instance returned by this last fluent builder method. If the configured tensor is larger than the number of provided elements, then they will simply be read multiple times until the result has been sufficiently populated.
            -
            -
            Parameters:
            -
            values - The values which will be used to populate the Tensor instance returned by this method.
            -
            Returns:
            +
            +
            Parameters:
            +
            values - The values which will be used to populate the Tensor instance returned by this method.
            +
            Returns:
            The final result of the fluent tensor builder API having a tensor filled with custom values.
            -
      • -
      • -
        -

        andFill

        -
        Nda<V> andFill(List<V> values)
        +
      + + + +
        +
      • +

        andFill

        +
        Nda<V> andFill(java.util.List<V> values)
        Provide a list of values which will be used to fill - the Tensor instance returned by this last fluent builder method. + the Tensor instance returned by this last fluent builder method. If the configured tensor is larger than the number of provided elements, then they will simply be read multiple times until the result has been sufficiently populated.
        -
        -
        Parameters:
        -
        values - The values which will be used to populate the Tensor instance returned by this method.
        -
        Returns:
        +
        +
        Parameters:
        +
        values - The values which will be used to populate the Tensor instance returned by this method.
        +
        Returns:
        The final result of the fluent tensor builder API having a tensor filled with custom values.
        -
    • -
    • -
      -

      andWhere

      -
      Nda<V> andWhere(Filler<V> filler)
      -
      Pass a lambda to this method which will be used to populate the Tensor +
    + + + +
      +
    • +

      andWhere

      +
      Nda<V> andWhere(Filler<V> filler)
      +
      Pass a lambda to this method which will be used to populate the Tensor built by this fluent builder API based on the indices of the tensor. - The lambda will receive the absolute index ranging from 0 to NDimensional.size() + The lambda will receive the absolute index ranging from 0 to NDimensional.size() as well as an array of shape based nd-indices which can be used to - initialize the underlying data of the Tensor more selectively.
      -
      -
      Parameters:
      + initialize the underlying data of the Tensor more selectively.
    +
    +
    Parameters:
    filler - A data element provider lambda mapping the indices to custom values.
    -
    Returns:
    -
    The resulting Tensor instance populated by the provided Filler.
    +
    Returns:
    +
    The resulting Tensor instance populated by the provided Filler.
    - -
  • -
    -

    andFillFrom

    -
    To<V> andFillFrom(V index)
    + + + + + + +
      +
    • +

      andFillFrom

      +
      To<V> andFillFrom(V index)
      This part of the builder API allows for specifying a range which starts from the provided value and will end at the value specified in the next builder step returned by this method. If the number in the created range is not sufficiently large enough to - fully populate the final Tensor instance built by this API, then the resulting + fully populate the final Tensor instance built by this API, then the resulting range will fill the underlying data array of the tensor recurrently.
      -
      -
      Parameters:
      -
      index - The start of the range which ought to supply the Tensor instance built by this API.
      -
      Returns:
      +
      +
      Parameters:
      +
      index - The start of the range which ought to supply the Tensor instance built by this API.
      +
      Returns:
      The next step in the builder method chain which expects to receive the end point of the range.
      -
  • -
  • -
    -

    all

    -
    Nda<V> all(V value)
    -
    This method creates and return a Tensor instance which + + + + + + +
      +
    • +

      all

      +
      Nda<V> all(V value)
      +
      This method creates and return a Tensor instance which will be homogeneously filled by the the provided value irrespective of the previously defined shape.
      -
      -
      Parameters:
      -
      value - The value which ought to populate the entire Tensor.
      -
      Returns:
      -
      The homogeneously populated Tensor instance.
      +
      +
      Parameters:
      +
      value - The value which ought to populate the entire Tensor.
      +
      Returns:
      +
      The homogeneously populated Tensor instance.
      -
  • -
  • -
    -

    andSeed

    -
    Nda<V> andSeed(Object seed)
    -
    This method creates and return a Tensor instance which + + + + +
      +
    • +

      andSeed

      +
      Nda<V> andSeed(java.lang.Object seed)
      +
      This method creates and return a Tensor instance which will be filled based on the provided seed object. The seed can be any object whose hash will serve as a basis for supplying the tensor with random data...
      -
      -
      Parameters:
      -
      seed - The seed based on which the value for populating the entire Tensor will be generated.
      -
      Returns:
      -
      The pseudo randomly populated Tensor instance.
      +
      +
      Parameters:
      +
      seed - The seed based on which the value for populating the entire Tensor will be generated.
      +
      Returns:
      +
      The pseudo randomly populated Tensor instance.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAllTensor.html b/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAllTensor.html index 82bfeff33..1e0513bd5 100644 --- a/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAllTensor.html +++ b/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAllTensor.html @@ -1,274 +1,394 @@ - + + - -IterByOrIterFromOrAllTensor (neureka 1.0.0 API) - - - - + +IterByOrIterFromOrAllTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface IterByOrIterFromOrAllTensor<V>

    +
    neureka.fluent.building.states
    +

    Interface IterByOrIterFromOrAllTensor<V>

    -
    -
    +
    +
    +
    -
    - +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      all(V value)
      -
      -
      This method creates and return a Tensor instance which +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        Tensor<V>all(V value) +
        This method creates and return a Tensor instance which will be homogeneously filled by the the provided value irrespective of the previously defined shape.
        - -
        default Tensor<V>
        -
        andFill(List<V> values)
        -
        +
        default Tensor<V>andFill(java.util.List<V> values)
        Provide a list of values which will be used to fill - the Tensor instance returned by this last fluent builder method.
        - - -
        andFill(V... values)
        -
        + the Tensor instance returned by this last fluent builder method.
        +
        Tensor<V>andFill(V... values)
        Provide an array of values which will be used to fill - the Tensor instance returned by this last fluent builder method.
        - - -
        andFillFrom(V index)
        -
        + the Tensor instance returned by this last fluent builder method.
        +
        ToForTensor<V>andFillFrom(V index)
        This part of the builder API allows for specifying a range which starts from the provided value and will end at the value specified in the next builder step returned by this method.
        - - - -
        -
        This method creates and return a Tensor instance which +
        Tensor<V>andSeed(java.lang.Object seed) +
        This method creates and return a Tensor instance which will be filled based on the provided seed object.
        - - -
        andWhere(Filler<V> filler)
        -
        -
        Pass a lambda to this method which will be used to populate the Tensor +
        Tensor<V>andWhere(Filler<V> filler) +
        Pass a lambda to this method which will be used to populate the Tensor built by this fluent builder API based on the indices of the tensor.
        - - - - - +
      -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        andFill

        -
        Tensor<V> andFill(V... values)
        +
          +
        • + + +

          Method Detail

          + + + + + +
            +
          • +

            andFill

            +
            Tensor<V> andFill(V... values)
            Provide an array of values which will be used to fill - the Tensor instance returned by this last fluent builder method. + the Tensor instance returned by this last fluent builder method. If the configured tensor is larger than the number of provided elements, then they will simply be read multiple times until the result has been sufficiently populated.
            -
            -
            Specified by:
            -
            andFill in interface IterByOrIterFromOrAll<V>
            -
            Parameters:
            -
            values - The values which will be used to populate the Tensor instance returned by this method.
            -
            Returns:
            +
            +
            Specified by:
            +
            andFill in interface IterByOrIterFromOrAll<V>
            +
            Parameters:
            +
            values - The values which will be used to populate the Tensor instance returned by this method.
            +
            Returns:
            The final result of the fluent tensor builder API having a tensor filled with custom values.
            -
      • -
      • -
        -

        andFill

        -
        default Tensor<V> andFill(List<V> values)
        +
      + + + +
        +
      • +

        andFill

        +
        default Tensor<V> andFill(java.util.List<V> values)
        Provide a list of values which will be used to fill - the Tensor instance returned by this last fluent builder method. + the Tensor instance returned by this last fluent builder method. If the configured tensor is larger than the number of provided elements, then they will simply be read multiple times until the result has been sufficiently populated.
        -
        -
        Specified by:
        -
        andFill in interface IterByOrIterFromOrAll<V>
        -
        Parameters:
        -
        values - The values which will be used to populate the Tensor instance returned by this method.
        -
        Returns:
        +
        +
        Specified by:
        +
        andFill in interface IterByOrIterFromOrAll<V>
        +
        Parameters:
        +
        values - The values which will be used to populate the Tensor instance returned by this method.
        +
        Returns:
        The final result of the fluent tensor builder API having a tensor filled with custom values.
        -
    • -
    • -
      -

      andWhere

      -
      Tensor<V> andWhere(Filler<V> filler)
      -
      Pass a lambda to this method which will be used to populate the Tensor +
    + + + +
      +
    • +

      andWhere

      +
      Tensor<V> andWhere(Filler<V> filler)
      +
      Pass a lambda to this method which will be used to populate the Tensor built by this fluent builder API based on the indices of the tensor. - The lambda will receive the absolute index ranging from 0 to NDimensional.size() + The lambda will receive the absolute index ranging from 0 to NDimensional.size() as well as an array of shape based nd-indices which can be used to - initialize the underlying data of the Tensor more selectively.
      -
      -
      Specified by:
      -
      andWhere in interface IterByOrIterFromOrAll<V>
      -
      Parameters:
      + initialize the underlying data of the Tensor more selectively.
    +
    +
    Specified by:
    +
    andWhere in interface IterByOrIterFromOrAll<V>
    +
    Parameters:
    filler - A data element provider lambda mapping the indices to custom values.
    -
    Returns:
    -
    The resulting Tensor instance populated by the provided Filler.
    +
    Returns:
    +
    The resulting Tensor instance populated by the provided Filler.
    - -
  • -
    -

    andFillFrom

    -
    ToForTensor<V> andFillFrom(V index)
    + + + + + + +
      +
    • +

      andFillFrom

      +
      ToForTensor<V> andFillFrom(V index)
      This part of the builder API allows for specifying a range which starts from the provided value and will end at the value specified in the next builder step returned by this method. If the number in the created range is not sufficiently large enough to - fully populate the final Tensor instance built by this API, then the resulting + fully populate the final Tensor instance built by this API, then the resulting range will fill the underlying data array of the tensor recurrently.
      -
      -
      Specified by:
      -
      andFillFrom in interface IterByOrIterFromOrAll<V>
      -
      Parameters:
      -
      index - The start of the range which ought to supply the Tensor instance built by this API.
      -
      Returns:
      +
      +
      Specified by:
      +
      andFillFrom in interface IterByOrIterFromOrAll<V>
      +
      Parameters:
      +
      index - The start of the range which ought to supply the Tensor instance built by this API.
      +
      Returns:
      The next step in the builder method chain which expects to receive the end point of the range.
      -
  • -
  • -
    -

    all

    -
    Tensor<V> all(V value)
    -
    This method creates and return a Tensor instance which + + + + + + +
      +
    • +

      all

      +
      Tensor<V> all(V value)
      +
      This method creates and return a Tensor instance which will be homogeneously filled by the the provided value irrespective of the previously defined shape.
      -
      -
      Specified by:
      -
      all in interface IterByOrIterFromOrAll<V>
      -
      Parameters:
      -
      value - The value which ought to populate the entire Tensor.
      -
      Returns:
      -
      The homogeneously populated Tensor instance.
      +
      +
      Specified by:
      +
      all in interface IterByOrIterFromOrAll<V>
      +
      Parameters:
      +
      value - The value which ought to populate the entire Tensor.
      +
      Returns:
      +
      The homogeneously populated Tensor instance.
      -
  • -
  • -
    -

    andSeed

    -
    Tensor<V> andSeed(Object seed)
    -
    This method creates and return a Tensor instance which + + + + +
      +
    • +

      andSeed

      +
      Tensor<V> andSeed(java.lang.Object seed)
      +
      This method creates and return a Tensor instance which will be filled based on the provided seed object. The seed can be any object whose hash will serve as a basis for supplying the tensor with random data...
      -
      -
      Specified by:
      -
      andSeed in interface IterByOrIterFromOrAll<V>
      -
      Parameters:
      -
      seed - The seed based on which the value for populating the entire Tensor will be generated.
      -
      Returns:
      -
      The pseudo randomly populated Tensor instance.
      +
      +
      Specified by:
      +
      andSeed in interface IterByOrIterFromOrAll<V>
      +
      Parameters:
      +
      seed - The seed based on which the value for populating the entire Tensor will be generated.
      +
      Returns:
      +
      The pseudo randomly populated Tensor instance.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAllTsr.html b/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAllTsr.html deleted file mode 100644 index 45c7c76a4..000000000 --- a/docs/jdocs/neureka/fluent/building/states/IterByOrIterFromOrAllTsr.html +++ /dev/null @@ -1,428 +0,0 @@ - - - - - -IterByOrIterFromOrAllTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface IterByOrIterFromOrAllTsr<V>

    -
    -
    -
    - -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethodDescription
        Tsr<V>all​(V value) -
        This method creates and return a Tsr instance which - will be homogeneously filled by the the provided value irrespective - of the previously defined shape.
        -
        default Tsr<V>andFill​(java.util.List<V> values) -
        Provide a list of values which will be used to fill - the Tsr instance returned by this last fluent builder method.
        -
        Tsr<V>andFill​(V... values) -
        Provide an array of values which will be used to fill - the Tsr instance returned by this last fluent builder method.
        -
        ToForTsr<V>andFillFrom​(V index) -
        This part of the builder API allows for specifying a range which starts from the - provided value and will end at the value specified in the next - builder step returned by this method.
        -
        Tsr<V>andSeed​(java.lang.Object seed) -
        This method creates and return a Tsr instance which - will be filled based on the provided seed object.
        -
        Tsr<V>andWhere​(Filler<V> filler) -
        Pass a lambda to this method which will be used to populate the Tsr - built by this fluent builder API based on the indices of the tensor.
        -
        -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - - - -
          -
        • -

          andFill

          -
          Tsr<V> andFill​(V... values)
          -
          Provide an array of values which will be used to fill - the Tsr instance returned by this last fluent builder method. - If the configured tensor is larger than the number of provided - elements, then they will simply be read multiple times until - the result has been sufficiently populated.
          -
          -
          Specified by:
          -
          andFill in interface IterByOrIterFromOrAll<V>
          -
          Parameters:
          -
          values - The values which will be used to populate the Tsr instance returned by this method.
          -
          Returns:
          -
          The final result of the fluent tensor builder API having a tensor filled with custom values.
          -
          -
        • -
        - - - -
          -
        • -

          andFill

          -
          default Tsr<V> andFill​(java.util.List<V> values)
          -
          Provide a list of values which will be used to fill - the Tsr instance returned by this last fluent builder method. - If the configured tensor is larger than the number of provided - elements, then they will simply be read multiple times until - the result has been sufficiently populated.
          -
          -
          Specified by:
          -
          andFill in interface IterByOrIterFromOrAll<V>
          -
          Parameters:
          -
          values - The values which will be used to populate the Tsr instance returned by this method.
          -
          Returns:
          -
          The final result of the fluent tensor builder API having a tensor filled with custom values.
          -
          -
        • -
        - - - -
          -
        • -

          andWhere

          -
          Tsr<V> andWhere​(Filler<V> filler)
          -
          Pass a lambda to this method which will be used to populate the Tsr - built by this fluent builder API based on the indices of the tensor. - The lambda will receive the absolute index ranging from 0 to NDimensional.size() - as well as an array of shape based nd-indices which can be used to - initialize the underlying data of the Tsr more selectively.
          -
          -
          Specified by:
          -
          andWhere in interface IterByOrIterFromOrAll<V>
          -
          Parameters:
          -
          filler - A data element provider lambda mapping the indices to custom values.
          -
          Returns:
          -
          The resulting Tsr instance populated by the provided Filler.
          -
          -
        • -
        - - - - - -
          -
        • -

          andFillFrom

          -
          ToForTsr<V> andFillFrom​(V index)
          -
          This part of the builder API allows for specifying a range which starts from the - provided value and will end at the value specified in the next - builder step returned by this method. - If the number in the created range is not sufficiently large enough to - fully populate the final Tsr instance built by this API, then the resulting - range will fill the underlying data array of the tensor recurrently.
          -
          -
          Specified by:
          -
          andFillFrom in interface IterByOrIterFromOrAll<V>
          -
          Parameters:
          -
          index - The start of the range which ought to supply the Tsr instance built by this API.
          -
          Returns:
          -
          The next step in the builder method chain which expects to receive the end point of the range.
          -
          -
        • -
        - - - - - -
          -
        • -

          all

          -
          Tsr<V> all​(V value)
          -
          This method creates and return a Tsr instance which - will be homogeneously filled by the the provided value irrespective - of the previously defined shape.
          -
          -
          Specified by:
          -
          all in interface IterByOrIterFromOrAll<V>
          -
          Parameters:
          -
          value - The value which ought to populate the entire Tsr.
          -
          Returns:
          -
          The homogeneously populated Tsr instance.
          -
          -
        • -
        - - - -
          -
        • -

          andSeed

          -
          Tsr<V> andSeed​(java.lang.Object seed)
          -
          This method creates and return a Tsr instance which - will be filled based on the provided seed object. - The seed can be any object whose hash will serve as a basis for - supplying the tensor with random data...
          -
          -
          Specified by:
          -
          andSeed in interface IterByOrIterFromOrAll<V>
          -
          Parameters:
          -
          seed - The seed based on which the value for populating the entire Tsr will be generated.
          -
          Returns:
          -
          The pseudo randomly populated Tsr instance.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/building/states/Step.html b/docs/jdocs/neureka/fluent/building/states/Step.html index e10c21ee6..525e1efcf 100644 --- a/docs/jdocs/neureka/fluent/building/states/Step.html +++ b/docs/jdocs/neureka/fluent/building/states/Step.html @@ -1,159 +1,253 @@ - + + - -Step (neureka 1.0.0 API) - - - - + +Step (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Step<V>

    +
    neureka.fluent.building.states
    +

    Interface Step<V>

    -
    -
    -
    Type Parameters:
    -
    V - The type of the values wrapped by the Tensor which is about to be instantiated.
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      V - The type of the values wrapped by the Tensor which is about to be instantiated.
      -
      +
      All Known Subinterfaces:
      -
      StepForTensor<V>
      +
      StepForTensor<V>
      -
      +
      All Known Implementing Classes:
      -
      NdaBuilder
      +
      NdaBuilder

      -
      public interface Step<V>
      +
      +
      public interface Step<V>
      This interface defines the last step in the call transition graph of the fluent builder API when - building a Tensor instance populated based on the values within a defined range. + building a Tensor instance populated based on the values within a defined range. This method embodies the last part of this range definition which consists of the chained - methods IterByOrIterFromOrAll.andFillFrom(Object), To.to(Object) - and lastly the method defined in this interface, namely: step(double).
      -
    -
    -
    + + +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      step(double size)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        Nda<V>step(double size)
        This is the last step in the call transition graph of the fluent builder API when - building a Tensor instance populated based on the values within a defined range.
        - - - - - + building a Tensor instance populated based on the values within a defined range. +
        +
      • +
    - -
    -
      +
    +
    +
    +
    +
    Parameters:
    size - The size of the step within the range defined by this fluent builder API.
    -
    Returns:
    -
    A new Tensor instance whose contents are filled based on the provided range.
    +
    Returns:
    +
    A new Tensor instance whose contents are filled based on the provided range.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/StepForTensor.html b/docs/jdocs/neureka/fluent/building/states/StepForTensor.html index c3c9a4cc8..78ee326e9 100644 --- a/docs/jdocs/neureka/fluent/building/states/StepForTensor.html +++ b/docs/jdocs/neureka/fluent/building/states/StepForTensor.html @@ -1,153 +1,247 @@ - + + - -StepForTensor (neureka 1.0.0 API) - - - - + +StepForTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface StepForTensor<V>

    +
    neureka.fluent.building.states
    +

    Interface StepForTensor<V>

    -
    -
    +
    +
    +
    -
    -
      +
      +
      public interface StepForTensor<V>
      +extends Step<V>
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      step(double size)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        Tensor<V>step(double size)
        This is the last step in the call transition graph of the fluent builder API when - building a Tensor instance populated based on the values within a defined range.
        - - - - - + building a Tensor instance populated based on the values within a defined range. +
        +
      • +
    - -
    -
      +
    +
    +
    +
    +
    Specified by:
    +
    step in interface Step<V>
    +
    Parameters:
    size - The size of the step within the range defined by this fluent builder API.
    -
    Returns:
    -
    A new Tensor instance whose contents are filled based on the provided range.
    +
    Returns:
    +
    A new Tensor instance whose contents are filled based on the provided range.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/StepForTsr.html b/docs/jdocs/neureka/fluent/building/states/StepForTsr.html deleted file mode 100644 index 626b71b7e..000000000 --- a/docs/jdocs/neureka/fluent/building/states/StepForTsr.html +++ /dev/null @@ -1,276 +0,0 @@ - - - - - -StepForTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface StepForTsr<V>

    -
    -
    -
    -
      -
    • -
      -
      All Superinterfaces:
      -
      Step<V>
      -
      -
      -
      All Known Implementing Classes:
      -
      NdaBuilder
      -
      -
      -
      public interface StepForTsr<V>
      -extends Step<V>
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethodDescription
        Tsr<V>step​(double size) -
        This is the last step in the call transition graph of the fluent builder API when - building a Tsr instance populated based on the values within a defined range.
        -
        -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          step

          -
          Tsr<V> step​(double size)
          -
          This is the last step in the call transition graph of the fluent builder API when - building a Tsr instance populated based on the values within a defined range. - This method embodies the last part of this range definition which consists of the chained - methods IterByOrIterFromOrAll.andFillFrom(Object), To.to(Object) - and lastly this very method Step.step(double). - This method allows one to set the step size used to space the entries within - the previously defined span between what has - been passed to IterByOrIterFromOrAll.andFillFrom(Object) - and what has been passed to To.to(Object)...
          -
          -
          Specified by:
          -
          step in interface Step<V>
          -
          Parameters:
          -
          size - The size of the step within the range defined by this fluent builder API.
          -
          Returns:
          -
          A new Tsr instance whose contents are filled based on the provided range.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/building/states/To.html b/docs/jdocs/neureka/fluent/building/states/To.html index 1383b824e..a58617eed 100644 --- a/docs/jdocs/neureka/fluent/building/states/To.html +++ b/docs/jdocs/neureka/fluent/building/states/To.html @@ -1,156 +1,252 @@ - + + - -To (neureka 1.0.0 API) - - - - + +To (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface To<V>

    +
    neureka.fluent.building.states
    +

    Interface To<V>

    -
    -
    -
    Type Parameters:
    +
    +
    +
    -
    -
    + + +
    +
    + - -
    -
      +
    +
    +
    +
    +
    Parameters:
    +
    index - The end point of the range previously specified in IterByOrIterFromOrAll.andFillFrom(Object).
    +
    Returns:
    +
    The last step in the call transition graph of the fluent builder API for building range based Tensor instances.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/ToForTensor.html b/docs/jdocs/neureka/fluent/building/states/ToForTensor.html index 40dd34a9d..97f65ca15 100644 --- a/docs/jdocs/neureka/fluent/building/states/ToForTensor.html +++ b/docs/jdocs/neureka/fluent/building/states/ToForTensor.html @@ -1,150 +1,246 @@ - + + - -ToForTensor (neureka 1.0.0 API) - - - - + +ToForTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ToForTensor<V>

    +
    neureka.fluent.building.states
    +

    Interface ToForTensor<V>

    -
    -
    +
    +
    +
      +
    • +
      All Superinterfaces:
      -
      To<V>
      +
      To<V>
      -
      +
      All Known Implementing Classes:
      -
      NdaBuilder
      +
      NdaBuilder

      -
      public interface ToForTensor<V> -extends To<V>
      -
    -
    -
      +
      +
      public interface ToForTensor<V>
      +extends To<V>
      + +
    +
    +
    + - -
    -
      +
    +
    +
    +
    +
    Specified by:
    +
    to in interface To<V>
    +
    Parameters:
    +
    index - The end point of the range previously specified in IterByOrIterFromOrAll.andFillFrom(Object).
    +
    Returns:
    +
    The last step in the call transition graph of the fluent builder API for building range based Tensor instances.
    - - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/ToForTsr.html b/docs/jdocs/neureka/fluent/building/states/ToForTsr.html deleted file mode 100644 index 959b33457..000000000 --- a/docs/jdocs/neureka/fluent/building/states/ToForTsr.html +++ /dev/null @@ -1,275 +0,0 @@ - - - - - -ToForTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface ToForTsr<V>

    -
    -
    -
    -
      -
    • -
      -
      All Superinterfaces:
      -
      To<V>
      -
      -
      -
      All Known Implementing Classes:
      -
      NdaBuilder
      -
      -
      -
      public interface ToForTsr<V>
      -extends To<V>
      -
    • -
    -
    -
    - -
    -
    - -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVector.html b/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVector.html index 7257f9635..bb15a2c7e 100644 --- a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVector.html +++ b/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVector.html @@ -1,245 +1,363 @@ - + + - -WithShapeOrScalarOrVector (neureka 1.0.0 API) - - - - + +WithShapeOrScalarOrVector (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface WithShapeOrScalarOrVector<V>

    +
    neureka.fluent.building.states
    +

    Interface WithShapeOrScalarOrVector<V>

    -
    -
    +
    +
    +
    -
    -
      +
      +
      public interface WithShapeOrScalarOrVector<V>
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      scalar(V value)
      -
      -
      This method created and return a scalar Tensor instance +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        Nda<V>scalar(V value) +
        This method created and return a scalar Tensor instance which wraps the provided value.
        - - -
        vector(Iterable<V> values)
        -
        -
        This method creates and returns a vector Tensor instance +
        Nda<V>vector(java.lang.Iterable<V> values) +
        This method creates and returns a vector Tensor instance which wraps the provided values.
        - - -
        vector(List<V> values)
        -
        -
        This method creates and returns a vector Tensor instance +
        Nda<V>vector(java.util.List<V> values) +
        This method creates and returns a vector Tensor instance which wraps the provided values.
        - - -
        vector(V... values)
        -
        -
        This method creates and returns a vector Tensor instance +
        Nda<V>vector(V... values) +
        This method creates and returns a vector Tensor instance which wraps the provided values.
        - - -
        withShape(int... shape)
        -
        +
        IterByOrIterFromOrAll<V>withShape(int... shape)
        Define a tensor shape by passing an array of int values to this method, - which represent the shape of the Tensor that should be built.
        - - -
        withShape(List<N> shape)
        -
        + which represent the shape of the Tensor that should be built.
        +
        <N extends java.lang.Number>
        IterByOrIterFromOrAll<V>
        withShape(java.util.List<N> shape)
        Define a tensor shape by passing a list of numbers to this method, - which represent the shape of the Tensor that should be built.
        - - - - - + which represent the shape of the Tensor that should be built. +
      -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        withShape

        -
        IterByOrIterFromOrAll<V> withShape(int... shape)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            withShape

            +
            IterByOrIterFromOrAll<V> withShape(int... shape)
            Define a tensor shape by passing an array of int values to this method, - which represent the shape of the Tensor that should be built. + which represent the shape of the Tensor that should be built. This should be called immediately after having specified the type of the tensor.
            -
            -
            Parameters:
            -
            shape - The shape array of the Tensor that should be built.
            -
            Returns:
            +
            +
            Parameters:
            +
            shape - The shape array of the Tensor that should be built.
            +
            Returns:
            The next step in the call transition graph of this fluent builder API.
            -
      • -
      • -
        -

        withShape

        -
        <N extends Number> IterByOrIterFromOrAll<V> withShape(List<N> shape)
        +
      + + + +
        +
      • +

        withShape

        +
        <N extends java.lang.Number> IterByOrIterFromOrAll<V> withShape(java.util.List<N> shape)
        Define a tensor shape by passing a list of numbers to this method, - which represent the shape of the Tensor that should be built. + which represent the shape of the Tensor that should be built. This should be called immediately after having specified the type of the tensor.
        -
        -
        Parameters:
        -
        shape - The shape list of the Tensor that should be built.
        -
        Returns:
        +
        +
        Parameters:
        +
        shape - The shape list of the Tensor that should be built.
        +
        Returns:
        The next step in the call transition graph of this fluent builder API.
        -
    • -
    • -
      -

      vector

      -
      Nda<V> vector(V... values)
      -
      This method creates and returns a vector Tensor instance +
    + + + + + +
      +
    • +

      vector

      +
      Nda<V> vector(V... values)
      +
      This method creates and returns a vector Tensor instance which wraps the provided values.
      -
      -
      Parameters:
      -
      values - The values which ought to be wrapped by a new vector Tensor instance.
      -
      Returns:
      -
      A vector Tensor instance wrapping the provided values.
      +
      +
      Parameters:
      +
      values - The values which ought to be wrapped by a new vector Tensor instance.
      +
      Returns:
      +
      A vector Tensor instance wrapping the provided values.
      -
    • -
    • -
      -

      vector

      -
      Nda<V> vector(List<V> values)
      -
      This method creates and returns a vector Tensor instance +
    + + + +
      +
    • +

      vector

      +
      Nda<V> vector(java.util.List<V> values)
      +
      This method creates and returns a vector Tensor instance which wraps the provided values.
      -
      -
      Parameters:
      +
      +
      Parameters:
      values - The list of values which ought to be turned into a vector.
      -
      Returns:
      +
      Returns:
      A vector representing the provided values.
      -
    • -
    • -
      -

      vector

      -
      Nda<V> vector(Iterable<V> values)
      -
      This method creates and returns a vector Tensor instance +
    + + + +
      +
    • +

      vector

      +
      Nda<V> vector(java.lang.Iterable<V> values)
      +
      This method creates and returns a vector Tensor instance which wraps the provided values.
      -
      -
      Parameters:
      +
      +
      Parameters:
      values - The list of values which ought to be turned into a vector.
      -
      Returns:
      +
      Returns:
      A vector representing the provided values.
      -
    • -
    • -
      -

      scalar

      -
      Nda<V> scalar(V value)
      -
      This method created and return a scalar Tensor instance +
    + + + + + +
      +
    • +

      scalar

      +
      Nda<V> scalar(V value)
      +
      This method created and return a scalar Tensor instance which wraps the provided value.
      -
      -
      Parameters:
      -
      value - The value which ought to be wrapped by a new scalar Tensor instance.
      -
      Returns:
      -
      A scala Tensor instance wrapping the provided value.
      +
      +
      Parameters:
      +
      value - The value which ought to be wrapped by a new scalar Tensor instance.
      +
      Returns:
      +
      A scala Tensor instance wrapping the provided value.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorOnDevice.html b/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorOnDevice.html index 59af01ac5..a636b1b03 100644 --- a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorOnDevice.html +++ b/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorOnDevice.html @@ -1,145 +1,243 @@ - + + - -WithShapeOrScalarOrVectorOnDevice (neureka 1.0.0 API) - - - - + +WithShapeOrScalarOrVectorOnDevice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface WithShapeOrScalarOrVectorOnDevice<V>

    +
    neureka.fluent.building.states
    +

    Interface WithShapeOrScalarOrVectorOnDevice<V>

    -
    -
    +
    +
    +
    -
    - +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        on

        - +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            on

            +
            WithShapeOrScalarOrVectorTensor<V> on(Device<? super V> device)
            Use this to specify the type onto which the tensor should be stored.
            -
            -
            Parameters:
            -
            device - The Device which should host the tensor built by this builder.
            -
            Returns:
            +
            +
            Parameters:
            +
            device - The Device which should host the tensor built by this builder.
            +
            Returns:
            The next fluent builder API step, which requires the definition of the shape of the tensor.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorTensor.html b/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorTensor.html index 014aafc24..a15aedcc1 100644 --- a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorTensor.html +++ b/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorTensor.html @@ -1,263 +1,380 @@ - + + - -WithShapeOrScalarOrVectorTensor (neureka 1.0.0 API) - - - - + +WithShapeOrScalarOrVectorTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface WithShapeOrScalarOrVectorTensor<V>

    +
    neureka.fluent.building.states
    +

    Interface WithShapeOrScalarOrVectorTensor<V>

    -
    -
    +
    +
    +
    -
    - +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        withShape

        +
        default <N extends java.lang.Number> IterByOrIterFromOrAllTensor<V> withShape(java.util.List<N> shape)
        Define a tensor shape by passing a list of numbers to this method, - which represent the shape of the Tensor that should be built. + which represent the shape of the Tensor that should be built. This should be called immediately after having specified the type of the tensor.
        -
        -
        Specified by:
        -
        withShape in interface WithShapeOrScalarOrVector<V>
        -
        Parameters:
        -
        shape - The shape list of the Tensor that should be built.
        -
        Returns:
        +
        +
        Specified by:
        +
        withShape in interface WithShapeOrScalarOrVector<V>
        +
        Parameters:
        +
        shape - The shape list of the Tensor that should be built.
        +
        Returns:
        The next step in the call transition graph of this fluent builder API.
        -
    • -
    • -
      -

      vector

      -
      Tensor<V> vector(V... values)
      -
      This method creates and returns a vector Tensor instance +
    + + + + + +
      +
    • +

      vector

      +
      Tensor<V> vector(V... values)
      +
      This method creates and returns a vector Tensor instance which wraps the provided values.
      -
      -
      Specified by:
      -
      vector in interface WithShapeOrScalarOrVector<V>
      -
      Parameters:
      -
      values - The values which ought to be wrapped by a new vector Tensor instance.
      -
      Returns:
      -
      A vector Tensor instance wrapping the provided values.
      +
      +
      Specified by:
      +
      vector in interface WithShapeOrScalarOrVector<V>
      +
      Parameters:
      +
      values - The values which ought to be wrapped by a new vector Tensor instance.
      +
      Returns:
      +
      A vector Tensor instance wrapping the provided values.
      -
    • -
    • -
      -

      vector

      -
      default Tensor<V> vector(List<V> values)
      -
      This method creates and returns a vector Tensor instance +
    + + + +
      +
    • +

      vector

      +
      default Tensor<V> vector(java.util.List<V> values)
      +
      This method creates and returns a vector Tensor instance which wraps the provided values.
      -
      -
      Specified by:
      -
      vector in interface WithShapeOrScalarOrVector<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      vector in interface WithShapeOrScalarOrVector<V>
      +
      Parameters:
      values - The list of values which ought to be turned into a vector.
      -
      Returns:
      +
      Returns:
      A vector representing the provided values.
      -
    • -
    • -
      -

      vector

      -
      default Tensor<V> vector(Iterable<V> values)
      -
      This method creates and returns a vector Tensor instance +
    + + + +
      +
    • +

      vector

      +
      default Tensor<V> vector(java.lang.Iterable<V> values)
      +
      This method creates and returns a vector Tensor instance which wraps the provided values.
      -
      -
      Specified by:
      -
      vector in interface WithShapeOrScalarOrVector<V>
      -
      Parameters:
      +
      +
      Specified by:
      +
      vector in interface WithShapeOrScalarOrVector<V>
      +
      Parameters:
      values - The list of values which ought to be turned into a vector.
      -
      Returns:
      +
      Returns:
      A vector representing the provided values.
      -
    • -
    • -
      -

      scalar

      -
      Tensor<V> scalar(V value)
      -
      This method created and return a scalar Tensor instance +
    + + + + + +
      +
    • +

      scalar

      +
      Tensor<V> scalar(V value)
      +
      This method created and return a scalar Tensor instance which wraps the provided value.
      -
      -
      Specified by:
      -
      scalar in interface WithShapeOrScalarOrVector<V>
      -
      Parameters:
      -
      value - The value which ought to be wrapped by a new scalar Tensor instance.
      -
      Returns:
      -
      A scala Tensor instance wrapping the provided value.
      +
      +
      Specified by:
      +
      scalar in interface WithShapeOrScalarOrVector<V>
      +
      Parameters:
      +
      value - The value which ought to be wrapped by a new scalar Tensor instance.
      +
      Returns:
      +
      A scala Tensor instance wrapping the provided value.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorTsr.html b/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorTsr.html deleted file mode 100644 index efe220807..000000000 --- a/docs/jdocs/neureka/fluent/building/states/WithShapeOrScalarOrVectorTsr.html +++ /dev/null @@ -1,414 +0,0 @@ - - - - - -WithShapeOrScalarOrVectorTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface WithShapeOrScalarOrVectorTsr<V>

    -
    -
    -
    - -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethodDescription
        Tsr<V>scalar​(V value) -
        This method created and return a scalar Tsr instance - which wraps the provided value.
        -
        default Tsr<V>vector​(java.lang.Iterable<V> values) -
        This method creates and returns a vector Tsr instance - which wraps the provided values.
        -
        default Tsr<V>vector​(java.util.List<V> values) -
        This method creates and returns a vector Tsr instance - which wraps the provided values.
        -
        Tsr<V>vector​(V... values) -
        This method creates and returns a vector Tsr instance - which wraps the provided values.
        -
        IterByOrIterFromOrAllTsr<V>withShape​(int... shape) -
        Define a tensor shape by passing an array of int values to this method, - which represent the shape of the Tsr that should be built.
        -
        default <N extends java.lang.Number>
        IterByOrIterFromOrAllTsr<V>
        withShape​(java.util.List<N> shape) -
        Define a tensor shape by passing a list of numbers to this method, - which represent the shape of the Tsr that should be built.
        -
        -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          withShape

          -
          IterByOrIterFromOrAllTsr<V> withShape​(int... shape)
          -
          Define a tensor shape by passing an array of int values to this method, - which represent the shape of the Tsr that should be built. - This should be called immediately after having specified the type of the tensor.
          -
          -
          Specified by:
          -
          withShape in interface WithShapeOrScalarOrVector<V>
          -
          Parameters:
          -
          shape - The shape array of the Tsr that should be built.
          -
          Returns:
          -
          The next step in the call transition graph of this fluent builder API.
          -
          -
        • -
        - - - -
          -
        • -

          withShape

          -
          default <N extends java.lang.Number> IterByOrIterFromOrAllTsr<V> withShape​(java.util.List<N> shape)
          -
          Define a tensor shape by passing a list of numbers to this method, - which represent the shape of the Tsr that should be built. - This should be called immediately after having specified the type of the tensor.
          -
          -
          Specified by:
          -
          withShape in interface WithShapeOrScalarOrVector<V>
          -
          Parameters:
          -
          shape - The shape list of the Tsr that should be built.
          -
          Returns:
          -
          The next step in the call transition graph of this fluent builder API.
          -
          -
        • -
        - - - - - -
          -
        • -

          vector

          -
          Tsr<V> vector​(V... values)
          -
          This method creates and returns a vector Tsr instance - which wraps the provided values.
          -
          -
          Specified by:
          -
          vector in interface WithShapeOrScalarOrVector<V>
          -
          Parameters:
          -
          values - The values which ought to be wrapped by a new vector Tsr instance.
          -
          Returns:
          -
          A vector Tsr instance wrapping the provided values.
          -
          -
        • -
        - - - -
          -
        • -

          vector

          -
          default Tsr<V> vector​(java.util.List<V> values)
          -
          This method creates and returns a vector Tsr instance - which wraps the provided values.
          -
          -
          Specified by:
          -
          vector in interface WithShapeOrScalarOrVector<V>
          -
          Parameters:
          -
          values - The list of values which ought to be turned into a vector.
          -
          Returns:
          -
          A vector representing the provided values.
          -
          -
        • -
        - - - -
          -
        • -

          vector

          -
          default Tsr<V> vector​(java.lang.Iterable<V> values)
          -
          This method creates and returns a vector Tsr instance - which wraps the provided values.
          -
          -
          Specified by:
          -
          vector in interface WithShapeOrScalarOrVector<V>
          -
          Parameters:
          -
          values - The list of values which ought to be turned into a vector.
          -
          Returns:
          -
          A vector representing the provided values.
          -
          -
        • -
        - - - - - -
          -
        • -

          scalar

          -
          Tsr<V> scalar​(V value)
          -
          This method created and return a scalar Tsr instance - which wraps the provided value.
          -
          -
          Specified by:
          -
          scalar in interface WithShapeOrScalarOrVector<V>
          -
          Parameters:
          -
          value - The value which ought to be wrapped by a new scalar Tsr instance.
          -
          Returns:
          -
          A scala Tsr instance wrapping the provided value.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/building/states/package-frame.html b/docs/jdocs/neureka/fluent/building/states/package-frame.html new file mode 100644 index 000000000..acdbb92e0 --- /dev/null +++ b/docs/jdocs/neureka/fluent/building/states/package-frame.html @@ -0,0 +1,27 @@ + + + + + +neureka.fluent.building.states (neureka 1.0.1 API) + + + + +

    neureka.fluent.building.states

    + + + diff --git a/docs/jdocs/neureka/fluent/building/states/package-summary.html b/docs/jdocs/neureka/fluent/building/states/package-summary.html index 14dee598e..253959636 100644 --- a/docs/jdocs/neureka/fluent/building/states/package-summary.html +++ b/docs/jdocs/neureka/fluent/building/states/package-summary.html @@ -1,116 +1,178 @@ - + + - -neureka.fluent.building.states (neureka 1.0.0 API) - - - - + +neureka.fluent.building.states (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.fluent.building.states

    +

    Package neureka.fluent.building.states

    -
    -
    package neureka.fluent.building.states
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/fluent/building/states/package-tree.html b/docs/jdocs/neureka/fluent/building/states/package-tree.html index c37fa94ed..b3d9e217e 100644 --- a/docs/jdocs/neureka/fluent/building/states/package-tree.html +++ b/docs/jdocs/neureka/fluent/building/states/package-tree.html @@ -1,90 +1,153 @@ - + + - -neureka.fluent.building.states Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.fluent.building.states Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.fluent.building.states

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/AxisSliceBuilder.html b/docs/jdocs/neureka/fluent/slicing/AxisSliceBuilder.html index 3e09ae806..cf2cd3e03 100644 --- a/docs/jdocs/neureka/fluent/slicing/AxisSliceBuilder.html +++ b/docs/jdocs/neureka/fluent/slicing/AxisSliceBuilder.html @@ -1,342 +1,477 @@ - + + - -AxisSliceBuilder (neureka 1.0.0 API) - - - - + +AxisSliceBuilder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AxisSliceBuilder<V>

    -
    -
    java.lang.Object -
    neureka.fluent.slicing.AxisSliceBuilder<V>
    +
    neureka.fluent.slicing
    +

    Class AxisSliceBuilder<V>

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.fluent.slicing.AxisSliceBuilder<V>
      • +
      +
    • +
    +
    +
    -
    - +
    +
    +
    +
    + + + + + + + + + + + + +
      +
    • +

      all

      +
      public AxisOrGetTensor<V> all()
      +
      Description copied from interface: FromOrAtTensor
      This is a convenience method replacing "from(0).to(axisSize-1)", meaning that it simply slices the whole current axis from the original tensor.
      -
      -
      Specified by:
      -
      all in interface FromOrAt<V>
      -
      Specified by:
      -
      all in interface FromOrAtTensor<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      all in interface FromOrAt<V>
      +
      Specified by:
      +
      all in interface FromOrAtTensor<V>
      +
      Returns:
      The next step in the slicing API which allows one to slice another axis or simply perform the actual slicing and get the tensor.
      -
    • -
    • -
      -

      axis

      -
      public FromOrAtTensor<V> axis(int axis)
      -
      This method returns an instance of the AxisSliceBuilder targeted by the provided index.
      -
      -
      Specified by:
      -
      axis in interface AxisOrGet<V>
      -
      Specified by:
      -
      axis in interface AxisOrGetTensor<V>
      -
      Parameters:
      +
    + + + + + + + + + + + +
      +
    • +

      detached

      +
      public Tensor<V> detached()
      +
      Description copied from interface: StepsOrAxisOrGetTensor
      This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration... - Contrary to the AxisOrGet.get() method, this method returns a slice which + Contrary to the AxisOrGet.get() method, this method returns a slice which is not part of the computation graph of the original tensor (meaning no autograd).
      -
      -
      Specified by:
      -
      detached in interface AxisOrGet<V>
      -
      Specified by:
      -
      detached in interface AxisOrGetTensor<V>
      -
      Specified by:
      -
      detached in interface StepsOrAxisOrGetTensor<V>
      -
      Returns:
      -
      A new Tensor instance which is a slice of the original tensor without autograd.
      +
      +
      Specified by:
      +
      detached in interface AxisOrGet<V>
      +
      Specified by:
      +
      detached in interface AxisOrGetTensor<V>
      +
      Specified by:
      +
      detached in interface StepsOrAxisOrGetTensor<V>
      +
      Returns:
      +
      A new Tensor instance which is a slice of the original tensor without autograd.
      -
    • -
    • -
      -

      resolve

      -
      public void resolve()
      -
      +
    + + + +
      +
    • +

      resolve

      +
      public void resolve()
      +
    • +
    - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/SliceBuilder.html b/docs/jdocs/neureka/fluent/slicing/SliceBuilder.html index 7815ee103..1e532ef25 100644 --- a/docs/jdocs/neureka/fluent/slicing/SliceBuilder.html +++ b/docs/jdocs/neureka/fluent/slicing/SliceBuilder.html @@ -1,252 +1,370 @@ - + + - -SliceBuilder (neureka 1.0.0 API) - - - - + +SliceBuilder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SliceBuilder<V>

    -
    -
    java.lang.Object -
    neureka.fluent.slicing.SliceBuilder<V>
    +
    neureka.fluent.slicing
    +

    Class SliceBuilder<V>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.fluent.slicing.SliceBuilder<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The type of the value(s) held by the tensor which ought to be sliced with the help of this builder.
      -
      +
      All Implemented Interfaces:
      -
      AxisOrGet<V>, AxisOrGetTensor<V>
      +
      AxisOrGet<V>, AxisOrGetTensor<V>

      -
      public class SliceBuilder<V> -extends Object -implements AxisOrGetTensor<V>
      +
      +
      public class SliceBuilder<V>
      +extends java.lang.Object
      +implements AxisOrGetTensor<V>
      This class is the heart of the slice builder API, collecting range configurations by exposing an API consisting of multiple interfaces which form a call state transition graph. - Instances of this class do not perform the actual slicing of a Tensor instance themselves, + Instances of this class do not perform the actual slicing of a Tensor instance themselves, however instead they merely serve as collectors of slice configuration data. - The API exposed by the SliceBuilder uses method chaining as well as a set of implemented interfaces + The API exposed by the SliceBuilder uses method chaining as well as a set of implemented interfaces which reference themselves in the form of the return types defined by the method signatures of said interfaces. A user of the API can only call methods exposed by the current "view" of the builder, namely a interface. This ensures a controlled order of calls to the API...
      -
    -
    -
      + +
    +
    +
    + + + + +
      +
    • +

      detached

      +
      public Tensor<V> detached()
      +
      Description copied from interface: AxisOrGetTensor
      This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration... - Contrary to the AxisOrGet.get() method, this method returns a slice which + Contrary to the AxisOrGet.get() method, this method returns a slice which is not part of the computation graph of the original tensor (meaning no autograd).
      -
      -
      Specified by:
      -
      detached in interface AxisOrGet<V>
      -
      Specified by:
      -
      detached in interface AxisOrGetTensor<V>
      -
      Returns:
      -
      A new Tensor instance which is a slice of the original tensor without autograd.
      +
      +
      Specified by:
      +
      detached in interface AxisOrGet<V>
      +
      Specified by:
      +
      detached in interface AxisOrGetTensor<V>
      +
      Returns:
      +
      A new Tensor instance which is a slice of the original tensor without autograd.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/SmartSlicer.html b/docs/jdocs/neureka/fluent/slicing/SmartSlicer.html index 207ab9c16..be044847f 100644 --- a/docs/jdocs/neureka/fluent/slicing/SmartSlicer.html +++ b/docs/jdocs/neureka/fluent/slicing/SmartSlicer.html @@ -1,162 +1,272 @@ - + + - -SmartSlicer (neureka 1.0.0 API) - - - - + +SmartSlicer (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SmartSlicer

    +
    neureka.fluent.slicing
    +

    Class SmartSlicer

    -
    java.lang.Object -
    neureka.fluent.slicing.SmartSlicer
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.fluent.slicing.SmartSlicer
      • +
      +
    • +
    +
    +
      +

    • -
      public class SmartSlicer -extends Object
      +
      +
      public class SmartSlicer
      +extends java.lang.Object
      This class is responsible for receiving any input and trying to interpret it so that a slice can be formed.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SmartSlicer

        -
        public SmartSlicer()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SmartSlicer

            +
            public SmartSlicer()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      slice

      -
      public static <ValType> Tensor<ValType> slice(Object[] ranges, - Tensor<ValType> source)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          slice

          +
          public static <ValType> Tensor<ValType> slice(java.lang.Object[] ranges,
          +                                              Tensor<ValType> source)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/package-frame.html b/docs/jdocs/neureka/fluent/slicing/package-frame.html new file mode 100644 index 000000000..2169502a4 --- /dev/null +++ b/docs/jdocs/neureka/fluent/slicing/package-frame.html @@ -0,0 +1,21 @@ + + + + + +neureka.fluent.slicing (neureka 1.0.1 API) + + + + +

    neureka.fluent.slicing

    + + + diff --git a/docs/jdocs/neureka/fluent/slicing/package-summary.html b/docs/jdocs/neureka/fluent/slicing/package-summary.html index 10050adf3..1fe09cce6 100644 --- a/docs/jdocs/neureka/fluent/slicing/package-summary.html +++ b/docs/jdocs/neureka/fluent/slicing/package-summary.html @@ -1,103 +1,153 @@ - + + - -neureka.fluent.slicing (neureka 1.0.0 API) - - - - + +neureka.fluent.slicing (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.fluent.slicing

    +

    Package neureka.fluent.slicing

    -
    -
    package neureka.fluent.slicing
    -
    -
      -
    • - -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
       
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        AxisSliceBuilder<V> 
        SliceBuilder<V>
        This class is the heart of the slice builder API, collecting range configurations by exposing an API consisting of multiple interfaces which form a call state transition graph.
        - - -
        +
        SmartSlicer
        This class is responsible for receiving any input and trying to interpret it so that a slice can be formed.
        - - - +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/package-tree.html b/docs/jdocs/neureka/fluent/slicing/package-tree.html index 5540d96e9..4ae61fbd5 100644 --- a/docs/jdocs/neureka/fluent/slicing/package-tree.html +++ b/docs/jdocs/neureka/fluent/slicing/package-tree.html @@ -1,73 +1,136 @@ - + + - -neureka.fluent.slicing Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.fluent.slicing Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.fluent.slicing

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/AxisOrGet.html b/docs/jdocs/neureka/fluent/slicing/states/AxisOrGet.html index c697a5136..d9acf1f63 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/AxisOrGet.html +++ b/docs/jdocs/neureka/fluent/slicing/states/AxisOrGet.html @@ -1,194 +1,296 @@ - + + - -AxisOrGet (neureka 1.0.0 API) - - - - + +AxisOrGet (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface AxisOrGet<V>

    +
    neureka.fluent.slicing.states
    +

    Interface AxisOrGet<V>

    -
    -
    -
    Type Parameters:
    -
    V - The type parameter for items of the Tensor which ought to be sliced.
    +
    +
    +
    -
    -
      + triggers the slicing and returns the resulting Tensor instance... or a call to + the FromOrAt interface which is the starting point for slicing individual axis of a tensor...
    + + +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      axis(int axis)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        FromOrAt<V>axis(int axis)
        Slicing a tensor ultimately means slicing one or more of its axes! This method allows one to specify which axis should be sliced next.
        - - - -
        +
        Nda<V>detached()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        - - -
        get()
        -
        +
        Nda<V>get()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        axis

        -
        FromOrAt<V> axis(int axis)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            axis

            +
            FromOrAt<V> axis(int axis)
            Slicing a tensor ultimately means slicing one or more of its axes! This method allows one to specify which axis should be sliced next.
            -
            -
            Parameters:
            +
            +
            Parameters:
            axis - The axis which ought to be sliced next.
            -
            Returns:
            +
            Returns:
            The fluent axis slicing API.
            -
      • -
      • -
        -

        get

        -
        Nda<V> get()
        +
      + + + +
        +
      • +

        get

        +
        Nda<V> get()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        -
        -
        Returns:
        -
        A new Tensor instance which is a slice of the original tensor.
        +
        +
        Returns:
        +
        A new Tensor instance which is a slice of the original tensor.
        -
    • -
    • -
      -

      detached

      -
      Nda<V> detached()
      +
    + + + +
      +
    • +

      detached

      +
      Nda<V> detached()
      This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration... - Contrary to the get() method, this method returns a slice which + Contrary to the get() method, this method returns a slice which is not part of the computation graph of the original tensor (meaning no autograd).
      -
      -
      Returns:
      -
      A new Tensor instance which is a slice of the original tensor without autograd.
      +
      +
      Returns:
      +
      A new Tensor instance which is a slice of the original tensor without autograd.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/AxisOrGetTensor.html b/docs/jdocs/neureka/fluent/slicing/states/AxisOrGetTensor.html index 8415c2a1a..d1ab63881 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/AxisOrGetTensor.html +++ b/docs/jdocs/neureka/fluent/slicing/states/AxisOrGetTensor.html @@ -1,196 +1,298 @@ - + + - -AxisOrGetTensor (neureka 1.0.0 API) - - - - + +AxisOrGetTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface AxisOrGetTensor<V>

    +
    neureka.fluent.slicing.states
    +

    Interface AxisOrGetTensor<V>

    -
    -
    +
    +
    +
    -
    -
      +
      +
      public interface AxisOrGetTensor<V>
      +extends AxisOrGet<V>
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      axis(int axis)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        FromOrAtTensor<V>axis(int axis)
        Slicing a tensor ultimately means slicing one or more of its axes! This method allows one to specify which axis should be sliced next.
        - - - -
        +
        Tensor<V>detached()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        - - -
        get()
        -
        +
        Tensor<V>get()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        axis

        -
        FromOrAtTensor<V> axis(int axis)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            axis

            +
            FromOrAtTensor<V> axis(int axis)
            Slicing a tensor ultimately means slicing one or more of its axes! This method allows one to specify which axis should be sliced next.
            -
            -
            Specified by:
            -
            axis in interface AxisOrGet<V>
            -
            Parameters:
            +
            +
            Specified by:
            +
            axis in interface AxisOrGet<V>
            +
            Parameters:
            axis - The axis which ought to be sliced next.
            -
            Returns:
            +
            Returns:
            The fluent axis slicing API.
            -
      • -
      • -
        -

        get

        -
        Tensor<V> get()
        +
      + + + +
        +
      • +

        get

        +
        Tensor<V> get()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        -
        -
        Specified by:
        -
        get in interface AxisOrGet<V>
        -
        Returns:
        -
        A new Tensor instance which is a slice of the original tensor.
        +
        +
        Specified by:
        +
        get in interface AxisOrGet<V>
        +
        Returns:
        +
        A new Tensor instance which is a slice of the original tensor.
        -
    • -
    • -
      -

      detached

      -
      Tensor<V> detached()
      +
    + + + +
      +
    • +

      detached

      +
      Tensor<V> detached()
      This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration... - Contrary to the AxisOrGet.get() method, this method returns a slice which + Contrary to the AxisOrGet.get() method, this method returns a slice which is not part of the computation graph of the original tensor (meaning no autograd).
      -
      -
      Specified by:
      -
      detached in interface AxisOrGet<V>
      -
      Returns:
      -
      A new Tensor instance which is a slice of the original tensor without autograd.
      +
      +
      Specified by:
      +
      detached in interface AxisOrGet<V>
      +
      Returns:
      +
      A new Tensor instance which is a slice of the original tensor without autograd.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/AxisOrGetTsr.html b/docs/jdocs/neureka/fluent/slicing/states/AxisOrGetTsr.html deleted file mode 100644 index c881c0d55..000000000 --- a/docs/jdocs/neureka/fluent/slicing/states/AxisOrGetTsr.html +++ /dev/null @@ -1,329 +0,0 @@ - - - - - -AxisOrGetTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface AxisOrGetTsr<V>

    -
    -
    -
    - -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethodDescription
        FromOrAtTsr<V>axis​(int axis) -
        Slicing a tensor ultimately means slicing one or more of its axes! - This method allows one to specify which axis should be sliced next.
        -
        Tsr<V>detached() -
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration...
        -
        Tsr<V>get() -
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration...
        -
        -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          axis

          -
          FromOrAtTsr<V> axis​(int axis)
          -
          Slicing a tensor ultimately means slicing one or more of its axes! - This method allows one to specify which axis should be sliced next.
          -
          -
          Specified by:
          -
          axis in interface AxisOrGet<V>
          -
          Parameters:
          -
          axis - The axis which ought to be sliced next.
          -
          Returns:
          -
          The fluent axis slicing API.
          -
          -
        • -
        - - - -
          -
        • -

          get

          -
          Tsr<V> get()
          -
          This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration...
          -
          -
          Specified by:
          -
          get in interface AxisOrGet<V>
          -
          Returns:
          -
          A new Tsr instance which is a slice of the original tensor.
          -
          -
        • -
        - - - -
          -
        • -

          detached

          -
          Tsr<V> detached()
          -
          This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration... - Contrary to the AxisOrGet.get() method, this method returns a slice which - is not part of the computation graph of the original tensor (meaning no autograd).
          -
          -
          Specified by:
          -
          detached in interface AxisOrGet<V>
          -
          Returns:
          -
          A new Tsr instance which is a slice of the original tensor without autograd.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/slicing/states/FromOrAt.html b/docs/jdocs/neureka/fluent/slicing/states/FromOrAt.html index 0ea84812c..b8d53aa09 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/FromOrAt.html +++ b/docs/jdocs/neureka/fluent/slicing/states/FromOrAt.html @@ -1,195 +1,297 @@ - + + - -FromOrAt (neureka 1.0.0 API) - - - - + +FromOrAt (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface FromOrAt<V>

    +
    neureka.fluent.slicing.states
    +

    Interface FromOrAt<V>

    -
    -
    -
    Type Parameters:
    -
    V - The type parameter for items of the Tensor which ought to be sliced.
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      V - The type parameter for items of the Tensor which ought to be sliced.
      -
      +
      All Known Subinterfaces:
      -
      FromOrAtTensor<V>
      +
      FromOrAtTensor<V>
      -
      +
      All Known Implementing Classes:
      -
      AxisSliceBuilder
      +
      AxisSliceBuilder

      -
      public interface FromOrAt<V>
      +
      +
      public interface FromOrAt<V>
      This is the starting point for defining the slice range of a specified axis within the call transition graph exposed by the slice builder API. - This interface defines 2 transition paths, namely a route to the To interface + This interface defines 2 transition paths, namely a route to the To interface of the call transition graph, which expects a range to be defined by - calling the methods from(int) and To.to(int), - or a call to the "at" method, which is a shortcut for calling from(int) and To.to(int) + calling the methods from(int) and To.to(int), + or a call to the "at" method, which is a shortcut for calling from(int) and To.to(int) with the same arguments.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      all()
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        AxisOrGet<V>all()
        This is a convenience method replacing "from(0).to(axisSize-1)", meaning that it simply slices the whole current axis from the original tensor.
        - - -
        at(int index)
        -
        +
        AxisOrGet<V>at(int index)
        This is a convenience method replacing "from(i).to(i)", meaning that it simply slices a single axis from the original tensor at the specified index.
        - -
        To<V>
        -
        from(int index)
        -
        +
        To<V>from(int index)
        This is the starting point for defining the slice range of a specified axis within the method chain/graph exposed by the slice builder API.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        from

        -
        To<V> from(int index)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            from

            +
            To<V> from(int index)
            This is the starting point for defining the slice range of a specified axis within the method chain/graph exposed by the slice builder API. It receives the index at which the slice range should start.
            -
            -
            Parameters:
            +
            +
            Parameters:
            index - A valid index in the current axis from which the slice should start.
            -
            Returns:
            +
            Returns:
            The next step in the slicing API which expects one to specify the end of the slice range.
            -
      • -
      • -
        -

        at

        -
        AxisOrGet<V> at(int index)
        +
      + + + +
        +
      • +

        at

        +
        AxisOrGet<V> at(int index)
        This is a convenience method replacing "from(i).to(i)", meaning that it simply slices a single axis from the original tensor at the specified index.
        -
        -
        Parameters:
        +
        +
        Parameters:
        index - The index which ought to be sliced.
        -
        Returns:
        +
        Returns:
        The next step in the slicing API which allows one to slice another axis or simply perform the actual slicing and get the tensor.
        -
    • -
    • -
      -

      all

      -
      AxisOrGet<V> all()
      +
    + + + +
      +
    • +

      all

      +
      AxisOrGet<V> all()
      This is a convenience method replacing "from(0).to(axisSize-1)", meaning that it simply slices the whole current axis from the original tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      The next step in the slicing API which allows one to slice another axis or simply perform the actual slicing and get the tensor.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/FromOrAtTensor.html b/docs/jdocs/neureka/fluent/slicing/states/FromOrAtTensor.html index 16eaa669e..0b7bc0416 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/FromOrAtTensor.html +++ b/docs/jdocs/neureka/fluent/slicing/states/FromOrAtTensor.html @@ -1,191 +1,293 @@ - + + - -FromOrAtTensor (neureka 1.0.0 API) - - - - + +FromOrAtTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface FromOrAtTensor<V>

    +
    neureka.fluent.slicing.states
    +

    Interface FromOrAtTensor<V>

    -
    -
    +
    +
    +
    -
    -
      +
      +
      public interface FromOrAtTensor<V>
      +extends FromOrAt<V>
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      all()
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        AxisOrGetTensor<V>all()
        This is a convenience method replacing "from(0).to(axisSize-1)", meaning that it simply slices the whole current axis from the original tensor.
        - - -
        at(int index)
        -
        +
        AxisOrGetTensor<V>at(int index)
        This is a convenience method replacing "from(i).to(i)", meaning that it simply slices a single axis from the original tensor at the specified index.
        - - -
        from(int index)
        -
        +
        ToForTensor<V>from(int index)
        This is the starting point for defining the slice range of a specified axis within the method chain/graph exposed by the slice builder API.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        from

        -
        ToForTensor<V> from(int index)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            from

            +
            ToForTensor<V> from(int index)
            This is the starting point for defining the slice range of a specified axis within the method chain/graph exposed by the slice builder API. It receives the index at which the slice range should start.
            -
            -
            Specified by:
            -
            from in interface FromOrAt<V>
            -
            Parameters:
            +
            +
            Specified by:
            +
            from in interface FromOrAt<V>
            +
            Parameters:
            index - A valid index in the current axis from which the slice should start.
            -
            Returns:
            +
            Returns:
            The next step in the slicing API which expects one to specify the end of the slice range.
            -
      • -
      • -
        -

        at

        -
        AxisOrGetTensor<V> at(int index)
        +
      + + + +
        +
      • +

        at

        +
        AxisOrGetTensor<V> at(int index)
        This is a convenience method replacing "from(i).to(i)", meaning that it simply slices a single axis from the original tensor at the specified index.
        -
        -
        Specified by:
        -
        at in interface FromOrAt<V>
        -
        Parameters:
        +
        +
        Specified by:
        +
        at in interface FromOrAt<V>
        +
        Parameters:
        index - The index which ought to be sliced.
        -
        Returns:
        +
        Returns:
        The next step in the slicing API which allows one to slice another axis or simply perform the actual slicing and get the tensor.
        -
    • -
    • -
      -

      all

      - +
    + + + +
      +
    • +

      all

      +
      AxisOrGetTensor<V> all()
      This is a convenience method replacing "from(0).to(axisSize-1)", meaning that it simply slices the whole current axis from the original tensor.
      -
      -
      Specified by:
      -
      all in interface FromOrAt<V>
      -
      Returns:
      +
      +
      Specified by:
      +
      all in interface FromOrAt<V>
      +
      Returns:
      The next step in the slicing API which allows one to slice another axis or simply perform the actual slicing and get the tensor.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/FromOrAtTsr.html b/docs/jdocs/neureka/fluent/slicing/states/FromOrAtTsr.html deleted file mode 100644 index 3b0bf8eab..000000000 --- a/docs/jdocs/neureka/fluent/slicing/states/FromOrAtTsr.html +++ /dev/null @@ -1,324 +0,0 @@ - - - - - -FromOrAtTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface FromOrAtTsr<V>

    -
    -
    -
    - -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethodDescription
        AxisOrGetTsr<V>all() -
        This is a convenience method replacing "from(0).to(axisSize-1)", meaning that - it simply slices the whole current axis from the original tensor.
        -
        AxisOrGetTsr<V>at​(int index) -
        This is a convenience method replacing "from(i).to(i)", meaning that - it simply slices a single axis from the original tensor at the specified index.
        -
        ToForTsr<V>from​(int index) -
        This is the starting point for defining the slice range of a specified axis within - the method chain/graph exposed by the slice builder API.
        -
        -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          from

          -
          ToForTsr<V> from​(int index)
          -
          This is the starting point for defining the slice range of a specified axis within - the method chain/graph exposed by the slice builder API. - It receives the index at which the slice range should start.
          -
          -
          Specified by:
          -
          from in interface FromOrAt<V>
          -
          Parameters:
          -
          index - A valid index in the current axis from which the slice should start.
          -
          Returns:
          -
          The next step in the slicing API which expects one to specify the end of the slice range.
          -
          -
        • -
        - - - -
          -
        • -

          at

          -
          AxisOrGetTsr<V> at​(int index)
          -
          This is a convenience method replacing "from(i).to(i)", meaning that - it simply slices a single axis from the original tensor at the specified index.
          -
          -
          Specified by:
          -
          at in interface FromOrAt<V>
          -
          Parameters:
          -
          index - The index which ought to be sliced.
          -
          Returns:
          -
          The next step in the slicing API which allows one to slice another axis or simply - perform the actual slicing and get the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          all

          -
          AxisOrGetTsr<V> all()
          -
          This is a convenience method replacing "from(0).to(axisSize-1)", meaning that - it simply slices the whole current axis from the original tensor.
          -
          -
          Specified by:
          -
          all in interface FromOrAt<V>
          -
          Returns:
          -
          The next step in the slicing API which allows one to slice another axis or simply - perform the actual slicing and get the tensor.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGet.html b/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGet.html index df878c38a..a58ba1f66 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGet.html +++ b/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGet.html @@ -1,162 +1,260 @@ - + + - -StepsOrAxisOrGet (neureka 1.0.0 API) - - - - + +StepsOrAxisOrGet (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface StepsOrAxisOrGet<V>

    +
    neureka.fluent.slicing.states
    +

    Interface StepsOrAxisOrGet<V>

    -
    -
    -
    Type Parameters:
    -
    V - The type parameter for items of the Tensor which ought to be sliced.
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      V - The type parameter for items of the Tensor which ought to be sliced.
      -
      +
      All Superinterfaces:
      -
      AxisOrGet<V>
      +
      AxisOrGet<V>
      -
      +
      All Known Subinterfaces:
      -
      StepsOrAxisOrGetTensor<V>
      +
      StepsOrAxisOrGetTensor<V>
      -
      +
      All Known Implementing Classes:
      -
      AxisSliceBuilder
      +
      AxisSliceBuilder

      -
      public interface StepsOrAxisOrGet<V> -extends AxisOrGet<V>
      -
      This interface extends the AxisOrGet interface which provides the option to either continue +
      +
      public interface StepsOrAxisOrGet<V>
      +extends AxisOrGet<V>
      +
      This interface extends the AxisOrGet interface which provides the option to either continue slicing another axis or simply trigger the creation and return of a slice instance based on the already provided slice configuration. The method signature introduced in this interface provides the possibility to set a step size - for the previously defined range (FromOrAt.from(int) and To.to(int)). + for the previously defined range (FromOrAt.from(int) and To.to(int)). This step size will be used to create spread steps within the sliced axis.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      step(int size)
      -
      +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        step

        -
        AxisOrGet<V> step(int size)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            step

            +
            AxisOrGet<V> step(int size)
            This method allows one to specify a step size within the slice range previously specified for the currently sliced axis.
            -
            -
            Parameters:
            -
            size - The step size of the iterator slicing the underlying Tensor shape.
            -
            Returns:
            +
            +
            Parameters:
            +
            size - The step size of the iterator slicing the underlying Tensor shape.
            +
            Returns:
            The next step in the slicing API which allows one to slice another axis or simply perform the actual slicing and get the tensor.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGetTensor.html b/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGetTensor.html index 5faa8e8fa..779edefb5 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGetTensor.html +++ b/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGetTensor.html @@ -1,200 +1,306 @@ - + + - -StepsOrAxisOrGetTensor (neureka 1.0.0 API) - - - - + +StepsOrAxisOrGetTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface StepsOrAxisOrGetTensor<V>

    +
    neureka.fluent.slicing.states
    +

    Interface StepsOrAxisOrGetTensor<V>

    -
    -
    +
    +
    +
    -
    - +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        Tensor<V>detached()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        - - -
        get()
        -
        +
        Tensor<V>get()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        - - -
        step(int size)
        -
        +
        AxisOrGetTensor<V>step(int size)
        This method allows one to specify a step size within the slice range previously specified for the currently sliced axis.
        - - - - -
        -

        Methods inherited from interface neureka.fluent.slicing.states.AxisOrGetTensor

        -axis
        - +
        +
      -
      -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        step

        -
        AxisOrGetTensor<V> step(int size)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            step

            +
            AxisOrGetTensor<V> step(int size)
            This method allows one to specify a step size within the slice range previously specified for the currently sliced axis.
            -
            -
            Specified by:
            -
            step in interface StepsOrAxisOrGet<V>
            -
            Parameters:
            -
            size - The step size of the iterator slicing the underlying Tensor shape.
            -
            Returns:
            +
            +
            Specified by:
            +
            step in interface StepsOrAxisOrGet<V>
            +
            Parameters:
            +
            size - The step size of the iterator slicing the underlying Tensor shape.
            +
            Returns:
            The next step in the slicing API which allows one to slice another axis or simply perform the actual slicing and get the tensor.
            -
      • -
      • -
        -

        get

        -
        Tensor<V> get()
        +
      + + + +
        +
      • +

        get

        +
        Tensor<V> get()
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration...
        -
        -
        Specified by:
        -
        get in interface AxisOrGet<V>
        -
        Specified by:
        -
        get in interface AxisOrGetTensor<V>
        -
        Returns:
        -
        A new Tensor instance which is a slice of the original tensor.
        +
        +
        Specified by:
        +
        get in interface AxisOrGet<V>
        +
        Specified by:
        +
        get in interface AxisOrGetTensor<V>
        +
        Returns:
        +
        A new Tensor instance which is a slice of the original tensor.
        -
    • -
    • -
      -

      detached

      -
      Tensor<V> detached()
      +
    + + + +
      +
    • +

      detached

      +
      Tensor<V> detached()
      This method concludes the slicing API by performing the actual slicing and - returning the resulting Tensor instance based on the previously + returning the resulting Tensor instance based on the previously specified slice configuration... - Contrary to the AxisOrGet.get() method, this method returns a slice which + Contrary to the AxisOrGet.get() method, this method returns a slice which is not part of the computation graph of the original tensor (meaning no autograd).
      -
      -
      Specified by:
      -
      detached in interface AxisOrGet<V>
      -
      Specified by:
      -
      detached in interface AxisOrGetTensor<V>
      -
      Returns:
      -
      A new Tensor instance which is a slice of the original tensor without autograd.
      +
      +
      Specified by:
      +
      detached in interface AxisOrGet<V>
      +
      Specified by:
      +
      detached in interface AxisOrGetTensor<V>
      +
      Returns:
      +
      A new Tensor instance which is a slice of the original tensor without autograd.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGetTsr.html b/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGetTsr.html deleted file mode 100644 index 267cb2883..000000000 --- a/docs/jdocs/neureka/fluent/slicing/states/StepsOrAxisOrGetTsr.html +++ /dev/null @@ -1,337 +0,0 @@ - - - - - -StepsOrAxisOrGetTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface StepsOrAxisOrGetTsr<V>

    -
    -
    -
    - -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethodDescription
        Tsr<V>detached() -
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration...
        -
        Tsr<V>get() -
        This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration...
        -
        AxisOrGetTsr<V>step​(int size) -
        This method allows one to specify a step size within the slice range - previously specified for the currently sliced axis.
        -
        - -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          step

          -
          AxisOrGetTsr<V> step​(int size)
          -
          This method allows one to specify a step size within the slice range - previously specified for the currently sliced axis.
          -
          -
          Specified by:
          -
          step in interface StepsOrAxisOrGet<V>
          -
          Parameters:
          -
          size - The step size of the iterator slicing the underlying Tsr shape.
          -
          Returns:
          -
          The next step in the slicing API which allows one to slice another axis or simply - perform the actual slicing and get the tensor.
          -
          -
        • -
        - - - -
          -
        • -

          get

          -
          Tsr<V> get()
          -
          This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration...
          -
          -
          Specified by:
          -
          get in interface AxisOrGet<V>
          -
          Specified by:
          -
          get in interface AxisOrGetTsr<V>
          -
          Returns:
          -
          A new Tsr instance which is a slice of the original tensor.
          -
          -
        • -
        - - - -
          -
        • -

          detached

          -
          Tsr<V> detached()
          -
          This method concludes the slicing API by performing the actual slicing and - returning the resulting Tsr instance based on the previously - specified slice configuration... - Contrary to the AxisOrGet.get() method, this method returns a slice which - is not part of the computation graph of the original tensor (meaning no autograd).
          -
          -
          Specified by:
          -
          detached in interface AxisOrGet<V>
          -
          Specified by:
          -
          detached in interface AxisOrGetTsr<V>
          -
          Returns:
          -
          A new Tsr instance which is a slice of the original tensor without autograd.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/slicing/states/To.html b/docs/jdocs/neureka/fluent/slicing/states/To.html index 21e60abcc..8d7c9598c 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/To.html +++ b/docs/jdocs/neureka/fluent/slicing/states/To.html @@ -1,155 +1,249 @@ - + + - -To (neureka 1.0.0 API) - - - - + +To (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface To<V>

    +
    neureka.fluent.slicing.states
    +

    Interface To<V>

    -
    -
    -
    Type Parameters:
    -
    V - The type parameter for items of the Tensor which ought to be sliced.
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      +
      V - The type parameter for items of the Tensor which ought to be sliced.
      -
      +
      All Known Subinterfaces:
      -
      ToForTensor<V>
      +
      ToForTensor<V>
      -
      +
      All Known Implementing Classes:
      -
      AxisSliceBuilder
      +
      AxisSliceBuilder

      -
      public interface To<V>
      +
      +
      public interface To<V>
      This is the second part for defining the slice range of a specified axis within the call transition graph exposed by the slice builder API. - This interface defines only 1 transition path, namely a route to the StepsOrAxisOrGet interface + This interface defines only 1 transition path, namely a route to the StepsOrAxisOrGet interface which, as the name suggests, offers 3 further call transition paths.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      to(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        StepsOrAxisOrGet<V>to(int index)
        This is the second part for defining the slice range of a specified axis within the call transition graph exposed by the slice fluent builder API.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        to

        -
        StepsOrAxisOrGet<V> to(int index)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            to

            +
            StepsOrAxisOrGet<V> to(int index)
            This is the second part for defining the slice range of a specified axis within the call transition graph exposed by the slice fluent builder API. This method is the only transition path possible for this interface. - It is leads to the StepsOrAxisOrGet interface + It is leads to the StepsOrAxisOrGet interface which, as the name suggests, offers 3 further call transition paths. This method simply expects the completion of a specified slice range for the current axis.
            -
            -
            Parameters:
            +
            +
            Parameters:
            index - The position where the range should end.
            -
            Returns:
            +
            Returns:
            The next step in the call transition graph of this fluent slice builder API.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/ToForTensor.html b/docs/jdocs/neureka/fluent/slicing/states/ToForTensor.html index 46be721a9..7900e802f 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/ToForTensor.html +++ b/docs/jdocs/neureka/fluent/slicing/states/ToForTensor.html @@ -1,150 +1,244 @@ - + + - -ToForTensor (neureka 1.0.0 API) - - - - + +ToForTensor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface ToForTensor<V>

    +
    neureka.fluent.slicing.states
    +

    Interface ToForTensor<V>

    -
    -
    +
    +
    +
    -
    -
      +
      +
      public interface ToForTensor<V>
      +extends To<V>
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      to(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        StepsOrAxisOrGetTensor<V>to(int index)
        This is the second part for defining the slice range of a specified axis within the call transition graph exposed by the slice fluent builder API.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        to

        -
        StepsOrAxisOrGetTensor<V> to(int index)
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            to

            +
            StepsOrAxisOrGetTensor<V> to(int index)
            This is the second part for defining the slice range of a specified axis within the call transition graph exposed by the slice fluent builder API. This method is the only transition path possible for this interface. - It is leads to the StepsOrAxisOrGet interface + It is leads to the StepsOrAxisOrGet interface which, as the name suggests, offers 3 further call transition paths. This method simply expects the completion of a specified slice range for the current axis.
            -
            -
            Specified by:
            -
            to in interface To<V>
            -
            Parameters:
            +
            +
            Specified by:
            +
            to in interface To<V>
            +
            Parameters:
            index - The position where the range should end.
            -
            Returns:
            +
            Returns:
            The next step in the call transition graph of this fluent slice builder API.
            -
      -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/ToForTsr.html b/docs/jdocs/neureka/fluent/slicing/states/ToForTsr.html deleted file mode 100644 index fc2171b28..000000000 --- a/docs/jdocs/neureka/fluent/slicing/states/ToForTsr.html +++ /dev/null @@ -1,273 +0,0 @@ - - - - - -ToForTsr (neureka 0.21.0 API) - - - - - - - - - - - - - -
    - -
    - -
    -
    - -

    Interface ToForTsr<V>

    -
    -
    -
    -
      -
    • -
      -
      All Superinterfaces:
      -
      To<V>
      -
      -
      -
      All Known Implementing Classes:
      -
      AxisSliceBuilder
      -
      -
      -
      public interface ToForTsr<V>
      -extends To<V>
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Summary

        - - - - - - - - - - - - -
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethodDescription
        StepsOrAxisOrGetTsr<V>to​(int index) -
        This is the second part for defining the slice range of a specified axis within - the call transition graph exposed by the slice fluent builder API.
        -
        -
      • -
      -
      -
    • -
    -
    -
    -
      -
    • - -
      -
        -
      • - - -

        Method Detail

        - - - -
          -
        • -

          to

          -
          StepsOrAxisOrGetTsr<V> to​(int index)
          -
          This is the second part for defining the slice range of a specified axis within - the call transition graph exposed by the slice fluent builder API. - This method is the only transition path possible for this interface. - It is leads to the StepsOrAxisOrGet interface - which, as the name suggests, offers 3 further call transition paths. - This method simply expects the completion of a specified slice range for the current axis.
          -
          -
          Specified by:
          -
          to in interface To<V>
          -
          Parameters:
          -
          index - The position where the range should end.
          -
          Returns:
          -
          The next step in the call transition graph of this fluent slice builder API.
          -
          -
        • -
        -
      • -
      -
      -
    • -
    -
    -
    -
    - -
    - -
    - - diff --git a/docs/jdocs/neureka/fluent/slicing/states/package-frame.html b/docs/jdocs/neureka/fluent/slicing/states/package-frame.html new file mode 100644 index 000000000..632649ca3 --- /dev/null +++ b/docs/jdocs/neureka/fluent/slicing/states/package-frame.html @@ -0,0 +1,26 @@ + + + + + +neureka.fluent.slicing.states (neureka 1.0.1 API) + + + + +

    neureka.fluent.slicing.states

    + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/package-summary.html b/docs/jdocs/neureka/fluent/slicing/states/package-summary.html index fe2ed600e..942d2c0cc 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/package-summary.html +++ b/docs/jdocs/neureka/fluent/slicing/states/package-summary.html @@ -1,119 +1,179 @@ - + + - -neureka.fluent.slicing.states (neureka 1.0.0 API) - - - - + +neureka.fluent.slicing.states (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.fluent.slicing.states

    -
    -
    -
    package neureka.fluent.slicing.states
    -
    -
      -
    • - -
    • -
    • -
      -
      Interfaces
      -
      -
      Class
      -
      Description
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        AxisOrGet<V>
        This is the starting point of the call transition graph exposed by the slice builder API.
        - - -
         
        - -
        +
        AxisOrGetTensor<V> 
        FromOrAt<V>
        This is the starting point for defining the slice range of a specified axis within the call transition graph exposed by the slice builder API.
        - - -
         
        - -
        -
        This interface extends the AxisOrGet interface which provides the option to either continue +
        FromOrAtTensor<V> 
        StepsOrAxisOrGet<V> +
        This interface extends the AxisOrGet interface which provides the option to either continue slicing another axis or simply trigger the creation and return of a slice instance based on the already provided slice configuration.
        - - -
         
        -
        To<V>
        -
        +
        StepsOrAxisOrGetTensor<V> 
        To<V>
        This is the second part for defining the slice range of a specified axis within the call transition graph exposed by the slice builder API.
        - - -
         
        - - +
        ToForTensor<V> 
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/fluent/slicing/states/package-tree.html b/docs/jdocs/neureka/fluent/slicing/states/package-tree.html index ff709238a..318ea113b 100644 --- a/docs/jdocs/neureka/fluent/slicing/states/package-tree.html +++ b/docs/jdocs/neureka/fluent/slicing/states/package-tree.html @@ -1,90 +1,153 @@ - + + - -neureka.fluent.slicing.states Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.fluent.slicing.states Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.fluent.slicing.states

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/framing/NDFrame.html b/docs/jdocs/neureka/framing/NDFrame.html index 8530f6c42..156eb3fc9 100644 --- a/docs/jdocs/neureka/framing/NDFrame.html +++ b/docs/jdocs/neureka/framing/NDFrame.html @@ -1,92 +1,123 @@ - + + - -NDFrame (neureka 1.0.0 API) - - - - + +NDFrame (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class NDFrame<V>

    -
    -
    java.lang.Object -
    neureka.framing.NDFrame<V>
    +
    neureka.framing
    +

    Class NDFrame<V>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.framing.NDFrame<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The type parameter of the value type of the tensor type to whom this component should belong.
      -
      +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>
      +
      Component<Tensor<V>>

      -
      public final class NDFrame<V> -extends Object -implements Component<Tensor<V>>
      +
      +
      public final class NDFrame<V>
      +extends java.lang.Object
      +implements Component<Tensor<V>>
      Instances of this class are components of tensors, which store aliases for the indices of the tensor. These indices aliases can be anything that has an identity, meaning any plain old object.
      There are two layers of aliasing/labeling provided by this class: @@ -99,202 +130,266 @@

      Class NDFrame<V>

    Lets for example imagine a tensor of rank 2 with the shape (3, 4), then the axis could for example be labeled - with a tuple of two String instances like: ("a","b").
    + with a tuple of two String instances like: ("a","b").
    Labeling the indices of the axis for this example requires 2 arrays whose length matches the axis sizes.
    The following mapping would be able to label both the axis and their indices:

    "a" : ["first", "second", "third"],
    "b" : ["one", "two", "three", "four"]

    -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        NDFrame

        -
        public NDFrame(List<List<Object>> labels, - Tensor<V> host, - String mainLabel)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            NDFrame

            +
            public NDFrame(java.util.List<java.util.List<java.lang.Object>> labels,
            +               Tensor<V> host,
            +               java.lang.String mainLabel)
          • -
          • -
            -

            NDFrame

            -
            public NDFrame(Tensor<V> host, - String tensorName)
            -
            +
          + + + + + + + +
            +
          • +

            NDFrame

            +
            public NDFrame(java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> labels,
            +               Tensor<V> host,
            +               java.lang.String ndaMainLabel)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      withLabel

      -
      public NDFrame<V> withLabel(String newLabel)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          withLabel

          +
          public NDFrame<V> withLabel(java.lang.String newLabel)
        • -
        • -
          -

          withAxesLabels

          -
          public NDFrame<V> withAxesLabels(List<List<Object>> labels)
          -
          +
        + + + +
          +
        • +

          withAxesLabels

          +
          public NDFrame<V> withAxesLabels(java.util.List<java.util.List<java.lang.Object>> labels)
        • -
        • -
          -

          get

          -
          public int[] get(List<Object> keys)
          -
          +
        + + + +
          +
        • +

          get

          +
          public int[] get(java.util.List<java.lang.Object> keys)
        • -
        • -
          -

          get

          -
          public int[] get(Object... keys)
          -
          +
        + + + +
          +
        • +

          get

          +
          public int[] get(java.lang.Object... keys)
        • -
        • -
          -

          hasLabelsForAxis

          -
          public boolean hasLabelsForAxis(Object axisAlias)
          -
          +
        + + + +
          +
        • +

          hasLabelsForAxis

          +
          public boolean hasLabelsForAxis(java.lang.Object axisAlias)
        • -
        • -
          -

          atAxis

          -
          public AxisFrame<Integer,V> atAxis(Object axisAlias)
          -
          A NDFrame exposes aliases for axes as well as aliases for individual positions within an axis. +
        + + + +
          +
        • +

          atAxis

          +
          public AxisFrame<java.lang.Integer,V> atAxis(java.lang.Object axisAlias)
          +
          A NDFrame exposes aliases for axes as well as aliases for individual positions within an axis. This method returns a view on a axis which is targeted by an axis alias as key. - This view is an instance of the AxisFrame class which provides useful methods + This view is an instance of the AxisFrame class which provides useful methods for getting or setting alias objects for individual positions for the given axis. This is useful when for example replacing certain aliases or simply taking a look at them.
          -
          -
          Parameters:
          -
          axisAlias - The axis alias object which targets an AxisFrame of NDFrame.
          -
          Returns:
          -
          A view of the targeted axis in the for of anAxisFrame which provides getters and setters for aliases.
          +
          +
          Parameters:
          +
          axisAlias - The axis alias object which targets an AxisFrame of NDFrame.
          +
          Returns:
          +
          A view of the targeted axis in the for of anAxisFrame which provides getters and setters for aliases.
          -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • -
  • -
    -

    update

    -
    public boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
    -
    Description copied from interface: Component
    + + + + +
      +
    • +

      update

      +
      public boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
      +
      Description copied from interface: Component
      Components are not the slaves of their owners. If the owner registers any state changes related to a given component, then said component will be informed by the owner about the change as well as receive @@ -304,40 +399,105 @@

      update

      is being added to, or removed from, its current owner. If components hold references to their owners then this method gives them the ability to update said reference when a new owner takes over the components of an old one. - The Component.OwnerChangeRequest implementation instance passed to this method - informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). + The Component.OwnerChangeRequest implementation instance passed to this method + informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). If this method returns false then this means that this component rejects the proposed update. The component owner will then abort the proposed change.
      -
      -
      Specified by:
      -
      update in interface Component<V>
      -
      Parameters:
      -
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      -
      Returns:
      +
      +
      Specified by:
      +
      update in interface Component<Tensor<V>>
      +
      Parameters:
      +
      changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
      +
      Returns:
      The truth value determining if the state change should be aborted or not.
      -
  • -
  • -
    -

    getState

    -
    public Map<Object,List<Object>> getState()
    -
    + + + + +
      +
    • +

      getState

      +
      public java.util.Map<java.lang.Object,java.util.List<java.lang.Object>> getState()
    • -
    • -
      -

      getLabel

      -
      public String getLabel()
      -
      +
    + + + +
      +
    • +

      getLabel

      +
      public java.lang.String getLabel()
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/Relation.html b/docs/jdocs/neureka/framing/Relation.html index bf3d69331..8f4e0b16f 100644 --- a/docs/jdocs/neureka/framing/Relation.html +++ b/docs/jdocs/neureka/framing/Relation.html @@ -1,214 +1,283 @@ - + + - -Relation (neureka 1.0.0 API) - - - - + +Relation (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Relation<V>

    -
    -
    java.lang.Object -
    neureka.framing.Relation<V>
    +
    neureka.framing
    +

    Class Relation<V>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.framing.Relation<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The data type class of the elements of the tensor to which this Relation belongs to.
      -
      +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>
      +
      Component<Tensor<V>>

      -
      public class Relation<V> -extends Object -implements Component<Tensor<V>>
      +
      +
      public class Relation<V>
      +extends java.lang.Object
      +implements Component<Tensor<V>>
      This class is an important tensor component responsible for managing the relationships between slices and the tensors from which they have been derived. - In case a tensor is a slice then it will have a Relation instance as + In case a tensor is a slice then it will have a Relation instance as component which will reference the parent tensor strongly (so that its data will not be lost). However, in case a tensor is the "parent" of a slice tensor then it will - contain a Relation instance which references the slices weakly (so that they can be garbage collected).
      + contain a Relation instance which references the slices weakly (so that they can be garbage collected).

      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        newParentToChildren

        -
        public static <T> Relation<T> newParentToChildren()
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            newParentToChildren

            +
            public static <T> Relation<T> newParentToChildren()
          • -
          • -
            -

            newChildToParent

            -
            public static <T> Relation<T> newChildToParent(Tensor<T> parent)
            -
            +
          + + + + + + + +
            +
          • +

            update

            +
            public boolean update(Component.OwnerChangeRequest<Tensor<V>> changeRequest)
            +
            Description copied from interface: Component
            Components are not the slaves of their owners. If the owner registers any state changes related to a given component, then said component will be informed by the owner about the change as well as receive @@ -218,31 +287,37 @@

            update

            is being added to, or removed from, its current owner. If components hold references to their owners then this method gives them the ability to update said reference when a new owner takes over the components of an old one. - The Component.OwnerChangeRequest implementation instance passed to this method - informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). + The Component.OwnerChangeRequest implementation instance passed to this method + informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). If this method returns false then this means that this component rejects the proposed update. The component owner will then abort the proposed change.
            -
            -
            Specified by:
            -
            update in interface Component<V>
            -
            Parameters:
            -
            changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
            -
            Returns:
            +
            +
            Specified by:
            +
            update in interface Component<Tensor<V>>
            +
            Parameters:
            +
            changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
            +
            Returns:
            The truth value determining if the state change should be aborted or not.
            -
    • -
    • -
      -

      addChild

      -
      public Relation<V> addChild(Tensor<V> child)
      -
      +
    + + + +
      +
    • +

      addChild

      +
      public Relation<V> addChild(Tensor<V> child)
    • -
    • -
      -

      addPermuteRelationFor

      -
      public void addPermuteRelationFor(Tensor<V> child, - int[] permuteOrder)
      +
    + + + +
      +
    • +

      addPermuteRelationFor

      +
      public void addPermuteRelationFor(Tensor<V> child,
      +                                  int[] permuteOrder)
      When creating permuted versions of slices then there must be a translation between the shape configuration between this new slice and the original parent tensor from which both slices @@ -256,17 +331,20 @@

      addPermuteRelationFor

      to a slice, which is also the "child" of the tensor to which this Relation component belongs!

      -
      -
      Parameters:
      +
      +
      Parameters:
      child - The child (slice) tensor which has a shape whose dimensions are in a different order.
      permuteOrder - The int array defining the axis order (dimension index mapping).
      -
    • -
    • -
      -

      getPermuteRelationFor

      -
      public int[] getPermuteRelationFor(Tensor<V> child)
      +
    + + + +
      +
    • +

      getPermuteRelationFor

      +
      public int[] getPermuteRelationFor(Tensor<V> child)
      When creating permuted versions of slices then there must be a translation between the shape configuration between this new slice and the original parent tensor from which both slices @@ -280,81 +358,164 @@

      getPermuteRelationFor

      from the order of the parent tensor (which is the component owner of this Relation) and the passed slice (which is a weakly referenced child tensor...).

      -
      -
      Parameters:
      +
      +
      Parameters:
      child - The child (slice) tensor which has a shape whose dimensions are in a different order.
      -
      Returns:
      +
      Returns:
      The int array defining the reshaping (dimension index mapping).
      -
    • -
    • -
      -

      getChildren

      -
      public List<Tensor<?>> getChildren()
      -
      +
    + + + +
      +
    • +

      getChildren

      +
      public java.util.List<Tensor<?>> getChildren()
    • -
    • -
      -

      findRootTensor

      -
      public Optional<Tensor<V>> findRootTensor()
      +
    + + + +
      +
    • +

      findRootTensor

      +
      public java.util.Optional<Tensor<V>> findRootTensor()
      This method tries to find the root data ancestor of this tensor. If this tensor is not a slice of another tensor, then it can not have data parents and therefore also not a root tensor, in which case the method will return null!
      -
      -
      Returns:
      +
      +
      Returns:
      The root data parent which actually owns the data of the sliced data or null if the tensor is not a slice.
      -
    • -
    • -
      -

      hasParent

      -
      public boolean hasParent()
      -
      +
    + + + +
      +
    • +

      hasParent

      +
      public boolean hasParent()
    • -
    • -
      -

      hasChildren

      -
      public boolean hasChildren()
      -
      +
    + + + +
      +
    • +

      hasChildren

      +
      public boolean hasChildren()
    • -
    • -
      -

      childCount

      -
      public int childCount()
      -
      +
    + + + +
      +
    • +

      childCount

      +
      public int childCount()
    • -
    • -
      -

      removeChild

      -
      public void removeChild(Tensor<V> child)
      -
      +
    + + + +
      +
    • +

      removeChild

      +
      public void removeChild(Tensor<V> child)
    • -
    • -
      -

      toString

      -
      public String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    • -
    • -
      -

      getParent

      -
      public Optional<Tensor<V>> getParent()
      -
      +
    + + + +
      +
    • +

      getParent

      +
      public java.util.Optional<Tensor<V>> getParent()
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/At.html b/docs/jdocs/neureka/framing/fluent/At.html index e4517a9d4..1c1a553ff 100644 --- a/docs/jdocs/neureka/framing/fluent/At.html +++ b/docs/jdocs/neureka/framing/fluent/At.html @@ -1,129 +1,225 @@ - + + - -At (neureka 1.0.0 API) - - - - + +At (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface At<K,R>

    +
    neureka.framing.fluent
    +

    Interface At<K,R>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      K - The key type which will be provided by the user of this method.
      R - The return type which will be provided by an implementation of this method.

      -
      public interface At<K,R>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      at(K key)
      -
       
      -
      -
      +
      +
      public interface At<K,R>
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        at

        -
        R at(K key)
        -
        + -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/AxisFrame.Builder.html b/docs/jdocs/neureka/framing/fluent/AxisFrame.Builder.html index b98ab2f21..6eb6aab54 100644 --- a/docs/jdocs/neureka/framing/fluent/AxisFrame.Builder.html +++ b/docs/jdocs/neureka/framing/fluent/AxisFrame.Builder.html @@ -1,180 +1,303 @@ - + + - -AxisFrame.Builder (neureka 1.0.0 API) - - - - + +AxisFrame.Builder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AxisFrame.Builder<SetType,GetType,ValueType>

    -
    -
    java.lang.Object -
    neureka.framing.fluent.AxisFrame.Builder<SetType,GetType,ValueType>
    +
    neureka.framing.fluent
    +

    Class AxisFrame.Builder<SetType,GetType,ValueType>

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.framing.fluent.AxisFrame.Builder<SetType,GetType,ValueType>
      • +
      +
    • +
    +
    +
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/AxisFrame.Set.html b/docs/jdocs/neureka/framing/fluent/AxisFrame.Set.html index d88c18d28..350bf4ccd 100644 --- a/docs/jdocs/neureka/framing/fluent/AxisFrame.Set.html +++ b/docs/jdocs/neureka/framing/fluent/AxisFrame.Set.html @@ -1,128 +1,222 @@ - + + - -AxisFrame.Set (neureka 1.0.0 API) - - - - + +AxisFrame.Set (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface AxisFrame.Set<V>

    +
    neureka.framing.fluent
    +

    Interface AxisFrame.Set<V>

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      setIndex(int value)
      -
       
      -
      -
      +
      +
      public static interface AxisFrame.Set<V>
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        setIndex

        -
        NDFrame<V> setIndex(int value)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            setIndex

            +
            NDFrame<V> setIndex(int value)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/AxisFrame.html b/docs/jdocs/neureka/framing/fluent/AxisFrame.html index 5b31ff2aa..8353fb7c3 100644 --- a/docs/jdocs/neureka/framing/fluent/AxisFrame.html +++ b/docs/jdocs/neureka/framing/fluent/AxisFrame.html @@ -1,205 +1,328 @@ - + + - -AxisFrame (neureka 1.0.0 API) - - - - + +AxisFrame (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AxisFrame<G,V>

    -
    -
    java.lang.Object -
    neureka.framing.fluent.AxisFrame<G,V>
    +
    neureka.framing.fluent
    +

    Class AxisFrame<G,V>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.framing.fluent.AxisFrame<G,V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      G - The get type which represents the aliases for this axis.
      -
      V - The value type which is the value type of the Tensor with this AxisFrame.
      +
      V - The value type which is the value type of the Tensor with this AxisFrame.

      -
      public final class AxisFrame<G,V> -extends Object
      -
      This class represents the labeled axis of an NDFrame.
      -
    -
    -
      - -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Class
      -
      Description
      -
      static class 
      - -
       
      -
      static interface 
      - -
       
      +
      +
      public final class AxisFrame<G,V>
      +extends java.lang.Object
      +
      This class represents the labeled axis of an NDFrame.
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        builder

        -
        public static <SetType, -GetType, -ValueType> -AxisFrame.Builder<SetType,GetType,ValueType> builder()
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            builder

            +
            public static <SetType,GetType,ValueType> AxisFrame.Builder<SetType,GetType,ValueType> builder()
          • -
          • -
            -

            getIndexAtAlias

            -
            public G getIndexAtAlias(Object aliasKey)
            -
            +
          + + + +
            +
          • +

            getIndexAtAlias

            +
            public G getIndexAtAlias(java.lang.Object aliasKey)
          • -
          • -
            -

            atIndexAlias

            -
            public AxisFrame.Set<V> atIndexAlias(Object aliasKey)
            -
            +
          + + + + + + + +
            +
          • +

            replace

            +
            public With<java.lang.Object,NDFrame<V>> replace(java.lang.Object indexAlias)
          • -
          • -
            -

            getAllAliases

            -
            public List<Object> getAllAliases()
            -
            +
          + + + +
            +
          • +

            getAllAliases

            +
            public java.util.List<java.lang.Object> getAllAliases()
          • -
          • -
            -

            getAllAliasesForIndex

            -
            public List<Object> getAllAliasesForIndex(int index)
            -
            +
          + + + +
            +
          • +

            getAllAliasesForIndex

            +
            public java.util.List<java.lang.Object> getAllAliasesForIndex(int index)
          -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/Get.html b/docs/jdocs/neureka/framing/fluent/Get.html index 3511506e2..813f9dd5a 100644 --- a/docs/jdocs/neureka/framing/fluent/Get.html +++ b/docs/jdocs/neureka/framing/fluent/Get.html @@ -1,124 +1,218 @@ - + + - -Get (neureka 1.0.0 API) - - - - + +Get (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Get<ValueType>

    +
    neureka.framing.fluent
    +

    Interface Get<ValueType>

    -
    +
    +
    +
      +

    • -
      public interface Get<ValueType>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      get()
      -
       
      -
      -
      +
      +
      public interface Get<ValueType>
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/Replace.html b/docs/jdocs/neureka/framing/fluent/Replace.html index 365b8d704..fc59bbaf6 100644 --- a/docs/jdocs/neureka/framing/fluent/Replace.html +++ b/docs/jdocs/neureka/framing/fluent/Replace.html @@ -1,124 +1,220 @@ - + + - -Replace (neureka 1.0.0 API) - - - - + +Replace (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Replace<ValueType,ReplacementType,ReturnType>

    +
    neureka.framing.fluent
    +

    Interface Replace<ValueType,ReplacementType,ReturnType>

    -
    +
    +
    +
      +

    • -
      public interface Replace<ValueType,ReplacementType,ReturnType>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
       
      -
      -
      +
      +
      public interface Replace<ValueType,ReplacementType,ReturnType>
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/Set.html b/docs/jdocs/neureka/framing/fluent/Set.html index 6b8ed0da6..1aaaa061f 100644 --- a/docs/jdocs/neureka/framing/fluent/Set.html +++ b/docs/jdocs/neureka/framing/fluent/Set.html @@ -1,124 +1,218 @@ - + + - -Set (neureka 1.0.0 API) - - - - + +Set (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Set<V>

    +
    neureka.framing.fluent
    +

    Interface Set<V>

    -
    +
    +
    +
      +

    • -
      public interface Set<V>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      set(int value)
      -
       
      -
      -
      +
      +
      public interface Set<V>
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/With.html b/docs/jdocs/neureka/framing/fluent/With.html index 2b41747b7..cc07c027b 100644 --- a/docs/jdocs/neureka/framing/fluent/With.html +++ b/docs/jdocs/neureka/framing/fluent/With.html @@ -1,124 +1,220 @@ - + + - -With (neureka 1.0.0 API) - - - - + +With (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface With<ValueType,TargetType>

    +
    neureka.framing.fluent
    +

    Interface With<ValueType,TargetType>

    -
    +
    +
    +
      +

    • -
      public interface With<ValueType,TargetType>
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      with(ValueType value)
      -
       
      -
      -
      +
      +
      public interface With<ValueType,TargetType>
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/framing/fluent/package-frame.html b/docs/jdocs/neureka/framing/fluent/package-frame.html new file mode 100644 index 000000000..afaddb33c --- /dev/null +++ b/docs/jdocs/neureka/framing/fluent/package-frame.html @@ -0,0 +1,29 @@ + + + + + +neureka.framing.fluent (neureka 1.0.1 API) + + + + +

    neureka.framing.fluent

    +
    +

    Interfaces

    + +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/framing/fluent/package-summary.html b/docs/jdocs/neureka/framing/fluent/package-summary.html index cdd1db91b..97bf4504c 100644 --- a/docs/jdocs/neureka/framing/fluent/package-summary.html +++ b/docs/jdocs/neureka/framing/fluent/package-summary.html @@ -1,115 +1,180 @@ - + + - -neureka.framing.fluent (neureka 1.0.0 API) - - - - + +neureka.framing.fluent (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.framing.fluent

    -
    -
    -
    package neureka.framing.fluent
    -
    -
      -
    • - +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        At<K,R> 
        AxisFrame.Set<V> 
        Get<ValueType> 
        Replace<ValueType,ReplacementType,ReturnType> 
        Set<V> 
        With<ValueType,TargetType> 
      • -
      • -
        -
        -
        -
        -
        Class
        -
        Description
        -
        At<K,R>
        -
         
        - -
        -
        This class represents the labeled axis of an NDFrame.
        -
        -
        AxisFrame.Builder<SetType,GetType,ValueType>
        -
         
        - -
         
        -
        Get<ValueType>
        -
         
        -
        Replace<ValueType,ReplacementType,ReturnType>
        -
         
        -
        Set<V>
        -
         
        -
        With<ValueType,TargetType>
        -
         
        -
        -
        -
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        AxisFrame<G,V> +
        This class represents the labeled axis of an NDFrame.
        +
        AxisFrame.Builder<SetType,GetType,ValueType> 
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/framing/fluent/package-tree.html b/docs/jdocs/neureka/framing/fluent/package-tree.html index e243468f5..72e9236dd 100644 --- a/docs/jdocs/neureka/framing/fluent/package-tree.html +++ b/docs/jdocs/neureka/framing/fluent/package-tree.html @@ -1,83 +1,144 @@ - + + - -neureka.framing.fluent Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.framing.fluent Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.framing.fluent

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

      -
    • neureka.framing.fluent.At<K,R>
    • -
    • neureka.framing.fluent.AxisFrame.Set<V>
    • -
    • neureka.framing.fluent.Get<ValueType>
    • -
    • neureka.framing.fluent.Replace<ValueType,ReplacementType,ReturnType>
    • -
    • neureka.framing.fluent.Set<V>
    • -
    • neureka.framing.fluent.With<ValueType,TargetType>
    • +
    • neureka.framing.fluent.At<K,R>
    • +
    • neureka.framing.fluent.AxisFrame.Set<V>
    • +
    • neureka.framing.fluent.Get<ValueType>
    • +
    • neureka.framing.fluent.Replace<ValueType,ReplacementType,ReturnType>
    • +
    • neureka.framing.fluent.Set<V>
    • +
    • neureka.framing.fluent.With<ValueType,TargetType>
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/framing/package-frame.html b/docs/jdocs/neureka/framing/package-frame.html new file mode 100644 index 000000000..0d3f48438 --- /dev/null +++ b/docs/jdocs/neureka/framing/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.framing (neureka 1.0.1 API) + + + + +

    neureka.framing

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/framing/package-summary.html b/docs/jdocs/neureka/framing/package-summary.html index 609551ca4..0cdba5961 100644 --- a/docs/jdocs/neureka/framing/package-summary.html +++ b/docs/jdocs/neureka/framing/package-summary.html @@ -1,103 +1,149 @@ - + + - -neureka.framing (neureka 1.0.0 API) - - - - + +neureka.framing (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.framing

    +

    Package neureka.framing

    -
    -
    package neureka.framing
    -
    -
      -
    • - -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        NDFrame<V>
        Instances of this class are components of tensors, which store aliases for the indices of the tensor.
        - - -
        +
        Relation<V>
        This class is an important tensor component responsible for managing the relationships between slices and the tensors from which they have been derived.
        - - - +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/framing/package-tree.html b/docs/jdocs/neureka/framing/package-tree.html index 1a71d0385..2a790affe 100644 --- a/docs/jdocs/neureka/framing/package-tree.html +++ b/docs/jdocs/neureka/framing/package-tree.html @@ -1,72 +1,135 @@ - + + - -neureka.framing Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.framing Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.framing

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/math/Function.Callable.html b/docs/jdocs/neureka/math/Function.Callable.html index 019f2baad..6a9b6e21c 100644 --- a/docs/jdocs/neureka/math/Function.Callable.html +++ b/docs/jdocs/neureka/math/Function.Callable.html @@ -1,179 +1,281 @@ - + + - -Function.Callable (neureka 1.0.0 API) - - - - + +Function.Callable (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.math
    -

    Interface Function.Callable

    +
    neureka.math
    +

    Interface Function.Callable

    -
    -
    +
    +
    +
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default <T> Tensor<T>
      -
      call(Tensor<T>... inputs)
      -
       
      - -
      execute(Tensor<?>... inputs)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default <T> Tensor<T>call(Tensor<T>... inputs) 
        Tensor<?>execute(Tensor<?>... inputs)
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
        - -
        default <T> Tensor<T>
        -
        invoke(Tensor<T>... inputs)
        -
        -
        This method is functionally identically to call(Tensor[]), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
        -
        - - - - +
        default <T> Tensor<T>invoke(Tensor<T>... inputs) +
        This method is functionally identically to call(Tensor[]), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
        +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        invoke

        -
        default <T> Tensor<T> invoke(Tensor<T>... inputs)
        -
        This method is functionally identically to call(Tensor[]), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
        -
        -
        Type Parameters:
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            invoke

            +
            default <T> Tensor<T> invoke(Tensor<T>... inputs)
            +
            This method is functionally identically to call(Tensor[]), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
            +
            +
            Type Parameters:
            T - The type parameter of the tensors passed to and returned by this function.
            -
            Parameters:
            -
            inputs - The tensors which should be sent through the owner function of this Function.Callable.
            -
            Returns:
            +
            Parameters:
            +
            inputs - The tensors which should be sent through the owner function of this Function.Callable.
            +
            Returns:
            The resulting tensor produced by this function.
            -
      • -
      • -
        -

        call

        -
        default <T> Tensor<T> call(Tensor<T>... inputs)
        -
        -
        Type Parameters:
        +
      + + + +
        +
      • +

        call

        +
        default <T> Tensor<T> call(Tensor<T>... inputs)
        +
        +
        Type Parameters:
        T - The type parameter of the tensors passed to and returned by this function.
        -
        Parameters:
        -
        inputs - The tensors which should be sent through the owner function of this Function.Callable.
        -
        Returns:
        +
        Parameters:
        +
        inputs - The tensors which should be sent through the owner function of this Function.Callable.
        +
        Returns:
        The resulting tensor produced by this function.
        -
    • -
    • -
      -

      execute

      -
      Tensor<?> execute(Tensor<?>... inputs)
      +
    + + + +
      +
    • +

      execute

      +
      Tensor<?> execute(Tensor<?>... inputs)
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The result from the execution of the provided tensors.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/Function.html b/docs/jdocs/neureka/math/Function.html index 3df8f96a1..8324b42cd 100644 --- a/docs/jdocs/neureka/math/Function.html +++ b/docs/jdocs/neureka/math/Function.html @@ -1,87 +1,113 @@ - + + - -Function (neureka 1.0.0 API) - - - - + +Function (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.math
    -

    Interface Function

    +
    neureka.math
    +

    Interface Function

    -
    -
    +
    +
    +
      +
    • +
      All Known Implementing Classes:
      -
      FunctionConstant, FunctionInput, FunctionNode, FunctionVariable
      +
      FunctionConstant, FunctionInput, FunctionNode, FunctionVariable

      -
      public interface Function
      -
      Besides the Tensor class, which is the core class of Neureka, this interface and its implementations +
      +
      public interface Function
      +
      Besides the Tensor class, which is the core class of Neureka, this interface and its implementations represents the second most important feature of this library. - Instances of Function implementations form an abstract syntax tree which is being built - from a provided expression String containing function syntax. + Instances of Function implementations form an abstract syntax tree which is being built + from a provided expression String containing function syntax.

      Just like functions in the mathematical sense, implementations of this interface receive a fixed number of inputs. @@ -89,958 +115,1196 @@

      Interface Function

      recognized by 'I[j]', 'Ij' or 'ij', where j is the input index. Functions accept arrays as their inputs, which is why variables must be targeted in such a way.
      -
    -
    -
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Interface
      -
      Description
      -
      static interface 
      - -
      -
      An API for calling a Function after having specified - a set of Arg instances through the with(Args) +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeInterface and Description
        static interface Function.Callable +
        An API for calling a Function after having specified + a set of Arg instances through the with(Args) method.
        - - - +
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default double
      -
      call(double input)
      -
      -
      Invokes this Function with the provided scalar as a single input and returns the scalar result.
      -
      -
      default double
      -
      call(double... inputs)
      -
      -
      Invokes this Function with the provided array of inputs.
      -
      -
      double
      -
      call(double[] inputs, - int j)
      -
      -
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
      -
      -
      default <T> Tensor<T>
      -
      call(List<Tensor<T>> inputs)
      -
       
      -
      default <T, -D extends Device<T>>
      Tensor<T>
      -
      call(Call.Builder<T,D> call)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default <T> Tensor<T>call(Args arguments, + Tensor<T>... tensors) +
        Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
        +
        default <T,D extends Device<T>>
        Tensor<T>
        call(Call.Builder<T,D> call)
        Use this for more control over the execution, which is especially useful when interfacing with more complex types of operations, requiring more context information.
        - -
        default <T, -D extends Device<T>>
        Tensor<T>
        -
        call(Call<D> call)
        -
        +
        default <T,D extends Device<T>>
        Tensor<T>
        call(Call<D> call)
        Use this for more control over the execution, which is very helpful when interfacing with more complex types of operations, requiring more context information.
        - -
        default <T> Tensor<T>
        -
        call(Args arguments, - Tensor<T>... tensors)
        -
        -
        Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
        -
        -
        default <T> Tensor<T>
        -
        call(Tensor<T> input)
        -
         
        -
        default <T> Tensor<T>
        -
        call(Tensor<T>... inputs)
        -
         
        -
        default <T> Tensor<T>
        -
        call(Tensor<T>[] inputs, - int j)
        -
         
        -
        boolean
        -
        dependsOn(int index)
        -
        +
        default doublecall(double... inputs) +
        Invokes this Function with the provided array of inputs.
        +
        default doublecall(double input) +
        Invokes this Function with the provided scalar as a single input and returns the scalar result.
        +
        doublecall(double[] inputs, + int j) +
        Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
        +
        default <T> Tensor<T>call(java.util.List<Tensor<T>> inputs) 
        default <T> Tensor<T>call(Tensor<T>... inputs) 
        default <T> Tensor<T>call(Tensor<T> input) 
        default <T> Tensor<T>call(Tensor<T>[] inputs, + int j) 
        booleandependsOn(int index)
        Use this to determine if this function directly or indirectly references an input with the provided index.
        - -
        double
        -
        derive(double[] inputs, - int index)
        -
        -
        Calculates the derivative of a particular input with respect to the output of this Function +
        doublederive(double[] inputs, + int index) +
        Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs and an index targeting the input to be derived.
        - -
        double
        -
        derive(double[] inputs, - int index, - int j)
        -
        -
        Calculates the derivative of a particular input with respect to the output of this Function +
        doublederive(double[] inputs, + int index, + int j) +
        Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs, an index targeting the input to be derived and an index for input dependent indexing.
        - -
        default <T> Tensor<T>
        -
        derive(List<Tensor<T>> inputs, - int index)
        -
         
        -
        default <T> Tensor<T>
        -
        derive(List<Tensor<T>> inputs, - int index, - int j)
        -
         
        -
        default <T> Tensor<T>
        -
        derive(Tensor<T>[] inputs, - int index)
        -
         
        -
        default <T> Tensor<T>
        -
        derive(Tensor<T>[] inputs, - int index, - int j)
        -
         
        -
        default Tensor<?>
        -
        execute(Call<?> call)
        -
        -
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions. - Use this to pass more context information for execution of input tensors.
        -
        - -
        execute(Args arguments, - Tensor<?>... inputs)
        -
        +
        default <T> Tensor<T>derive(java.util.List<Tensor<T>> inputs, + int index) 
        default <T> Tensor<T>derive(java.util.List<Tensor<T>> inputs, + int index, + int j) 
        default <T> Tensor<T>derive(Tensor<T>[] inputs, + int index) 
        default <T> Tensor<T>derive(Tensor<T>[] inputs, + int index, + int j) 
        Tensor<?>execute(Args arguments, + Tensor<?>... inputs)
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
        - Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
        - -
        default Tensor<?>
        -
        execute(Tensor<?>... inputs)
        -
        + Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
        +
        default Tensor<?>execute(Call<?> call) +
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions. + Use this to pass more context information for execution of input tensors.
        +
        default Tensor<?>execute(Tensor<?>... inputs)
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
        - -
        default Tensor<?>
        -
        execute(Tensor<?>[] inputs, - int j)
        -
        +
        default Tensor<?>execute(Tensor<?>[] inputs, + int j)
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
        - -
        default Tensor<?>
        -
        executeDerive(Tensor<?>[] inputs, - int index)
        -
        +
        default Tensor<?>executeDerive(Tensor<?>[] inputs, + int index)
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
        - -
        default Tensor<?>
        -
        executeDerive(Tensor<?>[] inputs, - int index, - int j)
        -
        +
        default Tensor<?>executeDerive(Tensor<?>[] inputs, + int index, + int j)
        Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
        - -
        default List<Function>
        - -
         
        - -
        getDerivative(int index)
        -
        -
        This method builds a new Function which is the derivative of this Function with respect to the provided input index.
        -
        - - -
         
        - - -
         
        -
        default double
        -
        invoke(double input)
        -
        -
        Invokes this Function with the provided scalar as a single input and returns the scalar result.
        -
        -
        default double
        -
        invoke(double... inputs)
        -
        -
        Invokes this Function with the provided array of inputs.
        -
        -
        default double
        -
        invoke(double[] inputs, - int j)
        -
        -
        Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
        -
        -
        default <T> Tensor<T>
        -
        invoke(List<Tensor<T>> input)
        -
        -
        This method is functionally identically to call(List), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
        -
        -
        default <T, -D extends Device<T>>
        Tensor<T>
        -
        invoke(Call.Builder<T,D> call)
        -
        +
        default java.util.List<Function>getAllFunctions() 
        FunctiongetDerivative(int index) +
        This method builds a new Function which is the derivative of this Function with respect to the provided input index.
        +
        OperationgetOperation() 
        java.util.List<Function>getSubFunctions() 
        default <T> Tensor<T>invoke(Args arguments, + Tensor<T>... inputs) +
        Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
        +
        default <T,D extends Device<T>>
        Tensor<T>
        invoke(Call.Builder<T,D> call)
        Use this to pass more context information for execution of input tensors.
        - -
        default <T> Tensor<T>
        -
        invoke(Args arguments, - Tensor<T>... inputs)
        -
        -
        Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
        -
        -
        default <T> Tensor<T>
        -
        invoke(Tensor<T> input)
        -
        -
        This method is functionally identically to call(Tensor), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
        -
        -
        default <T> Tensor<T>
        -
        invoke(Tensor<T>... inputs)
        -
        -
        This method is functionally identically to call(Tensor[]), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
        -
        -
        default <T> Tensor<T>
        -
        invoke(Tensor<T>[] inputs, - int j)
        -
        -
        This method is functionally identically to call(Tensor[], int), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
        -
        -
        boolean
        - -
        -
        Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions - whose isFlat() flag is set to false!
        -
        -
        boolean
        - -
         
        -
        default int
        - -
         
        -
        static Function
        -
        of(String expression)
        -
        -
        This static factory method will return Function instances - based on a provided mathematical String expression describing the function +
        default doubleinvoke(double... inputs) +
        Invokes this Function with the provided array of inputs.
        +
        default doubleinvoke(double input) +
        Invokes this Function with the provided scalar as a single input and returns the scalar result.
        +
        default doubleinvoke(double[] inputs, + int j) +
        Invokes this Function with the provided array of inputs ad an index for input dependent indexing.
        +
        default <T> Tensor<T>invoke(java.util.List<Tensor<T>> input) +
        This method is functionally identically to call(List), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
        +
        default <T> Tensor<T>invoke(Tensor<T>... inputs) +
        This method is functionally identically to call(Tensor[]), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
        +
        default <T> Tensor<T>invoke(Tensor<T> input) +
        This method is functionally identically to call(Tensor), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
        +
        default <T> Tensor<T>invoke(Tensor<T>[] inputs, + int j) +
        This method is functionally identically to call(Tensor[], int), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
        +
        booleanisDoingAD() +
        Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions + whose isFlat() flag is set to false!
        +
        booleanisFlat() 
        default intnumberOfArgs() 
        static Functionof(java.lang.String expression) +
        This static factory method will return Function instances + based on a provided mathematical String expression describing the function using 'I[0]', 'I[1]', 'I[2]'...
        - -
        static Function
        -
        of(String expression, - boolean doAD)
        -
        -
        This static factory method will return Function instances - based on a provided mathematical String expression describing the function +
        static Functionof(java.lang.String expression, + boolean doAD) +
        This static factory method will return Function instances + based on a provided mathematical String expression describing the function using 'I[0]', 'I[1]', 'I[2]'...
        - - - -
        +
        java.lang.StringtoString()
        Turns this function into a string representation which can be used to reconstruct this function or combine it with other function strings to parse entirely new functions...
        - - -
        with(Arg<?>... arguments)
        -
        -
        Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
        -
        - -
        with(Args arguments)
        -
        -
        Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
        -
        - - - - +
        default Function.Callablewith(Arg<?>... arguments) +
        Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
        +
        default Function.Callablewith(Args arguments) +
        Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
        +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        static Function of(String expression)
        -
        This static factory method will return Function instances - based on a provided mathematical String expression describing the function +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            static Function of(java.lang.String expression)
            +
            This static factory method will return Function instances + based on a provided mathematical String expression describing the function using 'I[0]', 'I[1]', 'I[2]'... as input variables or 'I[j]' to enable input dependent indexing like for example "sum( I[j] / 2 )". - The Function instances returned by this method will - by default perform autograd if any involved input Tensor requires gradients (see Tensor.rqsGradient()). - If one wishes to disable this behavior one might consider the use of the of(String, boolean) + The Function instances returned by this method will + by default perform autograd if any involved input Tensor requires gradients (see Tensor.rqsGradient()). + If one wishes to disable this behavior one might consider the use of the of(String, boolean) factory method.
            -
            -
            Parameters:
            +
            +
            Parameters:
            expression - The right part of a function equation where inputs are denoted by 'I[0]', 'I[1]', 'I[2]'...
            -
            Returns:
            -
            A Function instance created based on the provided String, ready to receive inputs and execute on them.
            +
            Returns:
            +
            A Function instance created based on the provided String, ready to receive inputs and execute on them.
            -
      • -
      • -
        -

        of

        -
        static Function of(String expression, - boolean doAD)
        -
        This static factory method will return Function instances - based on a provided mathematical String expression describing the function +
      + + + +
        +
      • +

        of

        +
        static Function of(java.lang.String expression,
        +                   boolean doAD)
        +
        This static factory method will return Function instances + based on a provided mathematical String expression describing the function using 'I[0]', 'I[1]', 'I[2]'... as input variables or 'I[j]' to enable input dependent indexing - like for example "sum( I[j] / 2 )" as well as a flag determining if the resulting Function + like for example "sum( I[j] / 2 )" as well as a flag determining if the resulting Function ought to be able to perform autograd or not.
        -
        -
        Parameters:
        +
        +
        Parameters:
        expression - The right part of a function equation where inputs are denoted by 'I[0]', 'I[1]', 'I[2]'...
        -
        doAD - A flag determining if the produced Function should be able to perform autograd (aka. auto-differentiation)
        -
        Returns:
        -
        A Function instance created based on the provided String, ready to receive inputs and execute on them.
        +
        doAD - A flag determining if the produced Function should be able to perform autograd (aka. auto-differentiation)
        +
        Returns:
        +
        A Function instance created based on the provided String, ready to receive inputs and execute on them.
        -
    • -
    • -
      -

      isDoingAD

      -
      boolean isDoingAD()
      -
      Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions - whose isFlat() flag is set to false!
      -
      -
      Returns:
      -
      The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
      +
    + + + +
      +
    • +

      isDoingAD

      +
      boolean isDoingAD()
      +
      Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions + whose isFlat() flag is set to false!
      +
      +
      Returns:
      +
      The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
      -
    • -
    • -
      -

      isFlat

      -
      boolean isFlat()
      -
      -
      Returns:
      -
      The truth value determining if the sub-functions of this Function do not themselves reference Functions.
      +
    + + + +
      +
    • +

      isFlat

      +
      boolean isFlat()
      +
      +
      Returns:
      +
      The truth value determining if the sub-functions of this Function do not themselves reference Functions.
      -
    • -
    • -
      -

      getOperation

      -
      Operation getOperation()
      -
      -
      Returns:
      -
      The Operation implementation instance responsible for executing any inputs received by this Function or null if this isFlat().
      +
    + + + +
      +
    • +

      getOperation

      +
      Operation getOperation()
      +
      +
      Returns:
      +
      The Operation implementation instance responsible for executing any inputs received by this Function or null if this isFlat().
      -
    • -
    • -
      -

      dependsOn

      -
      boolean dependsOn(int index)
      +
    + + + +
      +
    • +

      dependsOn

      +
      boolean dependsOn(int index)
      Use this to determine if this function directly or indirectly references an input with the provided index.
      -
      -
      Parameters:
      -
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      -
      Returns:
      -
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      +
      +
      Parameters:
      +
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      +
      Returns:
      +
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      -
    • -
    • -
      -

      getDerivative

      -
      Function getDerivative(int index)
      -
      This method builds a new Function which is the derivative of this Function with respect to the provided input index.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      getDerivative

      +
      Function getDerivative(int index)
      +
      This method builds a new Function which is the derivative of this Function with respect to the provided input index.
      +
      +
      Parameters:
      index - The index of the input which ought to serve as the variable which ought to be derived.
      -
      Returns:
      -
      The derivative of this Function.
      +
      Returns:
      +
      The derivative of this Function.
      -
    • -
    • -
      -

      getSubFunctions

      -
      List<Function> getSubFunctions()
      -
      -
      Returns:
      -
      The referenced child Function nodes of this Function AST node.
      +
    + + + +
      +
    • +

      getSubFunctions

      +
      java.util.List<Function> getSubFunctions()
      +
      +
      Returns:
      +
      The referenced child Function nodes of this Function AST node.
      -
    • -
    • -
      -

      getAllFunctions

      -
      default List<Function> getAllFunctions()
      -
      -
      Returns:
      -
      A list of all Function nodes within the abstract syntax tree defined by this.
      +
    + + + +
      +
    • +

      getAllFunctions

      +
      default java.util.List<Function> getAllFunctions()
      +
      +
      Returns:
      +
      A list of all Function nodes within the abstract syntax tree defined by this.
      -
    • -
    • -
      -

      numberOfArgs

      -
      default int numberOfArgs()
      -
      -
      Returns:
      -
      The number of inputs that this Function AST depends on.
      +
    + + + +
      +
    • +

      numberOfArgs

      +
      default int numberOfArgs()
      +
      +
      Returns:
      +
      The number of inputs that this Function AST depends on.
      -
    • -
    • -
      -

      call

      -
      default double call(double input)
      -
      Invokes this Function with the provided scalar as a single input and returns the scalar result. - This method is functionally equivalent to invoke(double) or - calling call(double...) or invoke(double...) with a single element array.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      call

      +
      default double call(double input)
      +
      Invokes this Function with the provided scalar as a single input and returns the scalar result. + This method is functionally equivalent to invoke(double) or + calling call(double...) or invoke(double...) with a single element array.
      +
      +
      Parameters:
      input - The scalar input, a single double value.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
    • -
    • -
      -

      invoke

      -
      default double invoke(double input)
      -
      Invokes this Function with the provided scalar as a single input and returns the scalar result. - This method is functionally equivalent to call(double) or - calling invoke(double...) or call(double...) with a single element array.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      invoke

      +
      default double invoke(double input)
      +
      Invokes this Function with the provided scalar as a single input and returns the scalar result. + This method is functionally equivalent to call(double) or + calling invoke(double...) or call(double...) with a single element array.
      +
      +
      Parameters:
      input - The scalar input, a single double value.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
    • -
    • -
      -

      call

      -
      double call(double[] inputs, - int j)
      -
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. - This method is functionally equivalent to invoke(double[], int).
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      call

      +
      double call(double[] inputs,
      +            int j)
      +
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. + This method is functionally equivalent to invoke(double[], int).
      +
      +
      Parameters:
      inputs - The array of inputs.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
    • -
    • -
      -

      invoke

      -
      default double invoke(double[] inputs, - int j)
      -
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. - This method is functionally equivalent to call(double[], int).
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      invoke

      +
      default double invoke(double[] inputs,
      +                      int j)
      +
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. + This method is functionally equivalent to call(double[], int).
      +
      +
      Parameters:
      inputs - The array of inputs.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
    • -
    • -
      -

      call

      -
      default double call(double... inputs)
      -
      Invokes this Function with the provided array of inputs. - This method is functionally equivalent to invoke(double[]).
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      call

      +
      default double call(double... inputs)
      +
      Invokes this Function with the provided array of inputs. + This method is functionally equivalent to invoke(double[]).
      +
      +
      Parameters:
      inputs - A double array of inputs.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
    • -
    • -
      -

      invoke

      -
      default double invoke(double... inputs)
      -
      Invokes this Function with the provided array of inputs. - This method is functionally equivalent to call(double[]).
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      invoke

      +
      default double invoke(double... inputs)
      +
      Invokes this Function with the provided array of inputs. + This method is functionally equivalent to call(double[]).
      +
      +
      Parameters:
      inputs - The double array of inputs.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
    • -
    • -
      -

      derive

      -
      double derive(double[] inputs, - int index, - int j)
      -
      Calculates the derivative of a particular input with respect to the output of this Function +
    + + + +
      +
    • +

      derive

      +
      double derive(double[] inputs,
      +              int index,
      +              int j)
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs, an index targeting the input to be derived and an index for input dependent indexing.
      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
    • -
    • -
      -

      derive

      -
      double derive(double[] inputs, - int index)
      -
      Calculates the derivative of a particular input with respect to the output of this Function +
    + + + +
      +
    • +

      derive

      +
      double derive(double[] inputs,
      +              int index)
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs and an index targeting the input to be derived.
      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
    • -
    • -
      -

      call

      -
      default <T, -D extends Device<T>> Tensor<T> call(Call.Builder<T,D> call)
      +
    + + + +
      +
    • +

      call

      +
      default <T,D extends Device<T>> Tensor<T> call(Call.Builder<T,D> call)
      Use this for more control over the execution, which is especially useful when interfacing with more complex types of operations, requiring more context information.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter of the tensors wrapped by the provided call.
      D - The type parameter of the device targeted by the provided call.
      -
      Parameters:
      +
      Parameters:
      call - A wrapper for input tensors, a target device and additional meta-arguments.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function executing the provided call.
      -
    • -
    • -
      -

      call

      -
      default <T, -D extends Device<T>> Tensor<T> call(Call<D> call)
      +
    + + + +
      +
    • +

      call

      +
      default <T,D extends Device<T>> Tensor<T> call(Call<D> call)
      Use this for more control over the execution, which is very helpful when interfacing with more complex types of operations, requiring more context information.
      -
      -
      Type Parameters:
      +
      +
      Type Parameters:
      T - The type parameter of the tensors wrapped by the provided call.
      D - The type parameter of the device targeted by the provided call.
      -
      Parameters:
      +
      Parameters:
      call - A wrapper for input tensors, a target device and additional meta-arguments.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function executing the provided call.
      -
    • -
    • -
      -

      invoke

      -
      default <T, -D extends Device<T>> Tensor<T> invoke(Call.Builder<T,D> call)
      +
    + + + +
      +
    • +

      invoke

      +
      default <T,D extends Device<T>> Tensor<T> invoke(Call.Builder<T,D> call)
      Use this to pass more context information for execution of input tensors. This is important when interfacing with more complex types of operations, requiring more fine-grained control over the execution. - This method is functionally identically to call(Call.Builder), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
      -
      -
      Type Parameters:
      + This method is functionally identically to call(Call.Builder), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
    +
    +
    Type Parameters:
    T - The type parameter of the tensors wrapped by the provided call.
    D - The type parameter of the device targeted by the provided call.
    -
    Parameters:
    +
    Parameters:
    call - A wrapper for input tensors, a target device and additional meta-arguments.
    -
    Returns:
    +
    Returns:
    The resulting tensor produced by this function executing the provided call.
    - -
  • -
    -

    execute

    -
    default Tensor<?> execute(Call<?> call)
    + + + + +
      +
    • +

      execute

      +
      default Tensor<?> execute(Call<?> call)
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions. Use this to pass more context information for execution of input tensors. This is important when interfacing with more complex types of operations, requiring more fine-grained control over the execution.
      -
  • -
  • -
    -

    with

    -
    default Function.Callable with(Arg<?>... arguments)
    -
    Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
    -
    -
    Parameters:
    + + + + +
      +
    • +

      with

      +
      default Function.Callable with(Arg<?>... arguments)
      +
      Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
      +
      +
      Parameters:
      arguments - A set of arguments you want to supply to this function for further control over the execution.
      -
      Returns:
      -
      A simple API for passing the Tensor arguments and calling this Function.
      +
      Returns:
      +
      A simple API for passing the Tensor arguments and calling this Function.
      -
  • -
  • -
    -

    with

    -
    default Function.Callable with(Args arguments)
    -
    Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
    -
    -
    Parameters:
    + + + + +
      +
    • +

      with

      +
      default Function.Callable with(Args arguments)
      +
      Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
      +
      +
      Parameters:
      arguments - A set of arguments you want to supply to this function for further control over the execution.
      -
      Returns:
      -
      A simple API for passing the Tensor arguments and calling this Function.
      +
      Returns:
      +
      A simple API for passing the Tensor arguments and calling this Function.
      -
  • -
  • -
    -

    call

    -
    default <T> Tensor<T> call(Args arguments, - Tensor<T>... tensors)
    -
    Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      call

      +
      default <T> Tensor<T> call(Args arguments,
      +                           Tensor<T>... tensors)
      +
      Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      arguments - A set of arguments you want to supply to this function for further control over the execution.
      tensors - The tensors which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    invoke

    -
    default <T> Tensor<T> invoke(Args arguments, - Tensor<T>... inputs)
    -
    Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      invoke

      +
      default <T> Tensor<T> invoke(Args arguments,
      +                             Tensor<T>... inputs)
      +
      Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      arguments - A set of arguments you want to supply to this function for further control over the execution.
      inputs - The tensors which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    execute

    -
    Tensor<?> execute(Args arguments, - Tensor<?>... inputs)
    + + + + +
      +
    • +

      execute

      +
      Tensor<?> execute(Args arguments,
      +                  Tensor<?>... inputs)
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      - Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
      -
      -
      Parameters:
      + Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
  • +
    +
    Parameters:
    arguments - A set of arguments you want to supply to this function for further control over the execution.
    inputs - The tensors which should be sent through this function.
    -
    Returns:
    +
    Returns:
    The resulting tensor produced by this function.
    - -
  • -
    -

    execute

    -
    default Tensor<?> execute(Tensor<?>... inputs)
    + + + + +
      +
    • +

      execute

      +
      default Tensor<?> execute(Tensor<?>... inputs)
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    execute

    -
    default Tensor<?> execute(Tensor<?>[] inputs, - int j)
    + + + + +
      +
    • +

      execute

      +
      default Tensor<?> execute(Tensor<?>[] inputs,
      +                          int j)
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      -
      -
      Parameters:
      +
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      j - The input index used by indexer operations to target a particular input.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    executeDerive

    -
    default Tensor<?> executeDerive(Tensor<?>[] inputs, - int index, - int j)
    + + + + +
      +
    • +

      executeDerive

      +
      default Tensor<?> executeDerive(Tensor<?>[] inputs,
      +                                int index,
      +                                int j)
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      -
  • -
  • -
    -

    executeDerive

    -
    default Tensor<?> executeDerive(Tensor<?>[] inputs, - int index)
    + + + + +
      +
    • +

      executeDerive

      +
      default Tensor<?> executeDerive(Tensor<?>[] inputs,
      +                                int index)
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      -
  • -
  • -
    -

    call

    -
    default <T> Tensor<T> call(Tensor<T> input)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      call

      +
      default <T> Tensor<T> call(Tensor<T> input)
      +
      +
      Type Parameters:
      T - The type parameter of the tensor passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      input - The tensor which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    invoke

    -
    default <T> Tensor<T> invoke(Tensor<T> input)
    -
    This method is functionally identically to call(Tensor), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      invoke

      +
      default <T> Tensor<T> invoke(Tensor<T> input)
      +
      This method is functionally identically to call(Tensor), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
      +
      +
      Type Parameters:
      T - The type parameter of the tensor passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      input - The tensor which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    call

    -
    default <T> Tensor<T> call(List<Tensor<T>> inputs)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      call

      +
      default <T> Tensor<T> call(java.util.List<Tensor<T>> inputs)
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The list tensors which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    invoke

    -
    default <T> Tensor<T> invoke(List<Tensor<T>> input)
    -
    This method is functionally identically to call(List), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      invoke

      +
      default <T> Tensor<T> invoke(java.util.List<Tensor<T>> input)
      +
      This method is functionally identically to call(List), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
      +
      +
      Type Parameters:
      T - The type parameter of the tensor passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      input - The tensor which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    call

    -
    default <T> Tensor<T> call(Tensor<T>[] inputs, - int j)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      call

      +
      default <T> Tensor<T> call(Tensor<T>[] inputs,
      +                           int j)
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      j - The input index used by indexer operations to target a particular input.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    invoke

    -
    default <T> Tensor<T> invoke(Tensor<T>[] inputs, - int j)
    -
    This method is functionally identically to call(Tensor[], int), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      invoke

      +
      default <T> Tensor<T> invoke(Tensor<T>[] inputs,
      +                             int j)
      +
      This method is functionally identically to call(Tensor[], int), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      j - The input index used by indexer operations to target a particular input.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    call

    -
    default <T> Tensor<T> call(Tensor<T>... inputs)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      call

      +
      default <T> Tensor<T> call(Tensor<T>... inputs)
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    invoke

    -
    default <T> Tensor<T> invoke(Tensor<T>... inputs)
    -
    This method is functionally identically to call(Tensor[]), however it is best used - in Kotlin, where one can omit the function name entirely and call this Function directly!
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      invoke

      +
      default <T> Tensor<T> invoke(Tensor<T>... inputs)
      +
      This method is functionally identically to call(Tensor[]), however it is best used + in Kotlin, where one can omit the function name entirely and call this Function directly!
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    derive

    -
    default <T> Tensor<T> derive(Tensor<T>[] inputs, - int index, - int j)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      derive

      +
      default <T> Tensor<T> derive(Tensor<T>[] inputs,
      +                             int index,
      +                             int j)
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      index - The index of the input tensor which should be derived.
      j - The input index used by indexer operations to target a particular input.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    derive

    -
    default <T> Tensor<T> derive(Tensor<T>[] inputs, - int index)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      derive

      +
      default <T> Tensor<T> derive(Tensor<T>[] inputs,
      +                             int index)
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The tensors which should be sent through this function.
      index - The index of the input tensor which should be derived.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    derive

    -
    default <T> Tensor<T> derive(List<Tensor<T>> inputs, - int index, - int j)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      derive

      +
      default <T> Tensor<T> derive(java.util.List<Tensor<T>> inputs,
      +                             int index,
      +                             int j)
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The list of tensors which should be sent through this function.
      index - The index of the input tensor which should be derived.
      j - The input index used by indexer operations to target a particular input.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    derive

    -
    default <T> Tensor<T> derive(List<Tensor<T>> inputs, - int index)
    -
    -
    Type Parameters:
    + + + + +
      +
    • +

      derive

      +
      default <T> Tensor<T> derive(java.util.List<Tensor<T>> inputs,
      +                             int index)
      +
      +
      Type Parameters:
      T - The type parameter of the tensors passed to and returned by this function.
      -
      Parameters:
      +
      Parameters:
      inputs - The list of tensors which should be sent through this function.
      index - The index of the input tensor which should be derived.
      -
      Returns:
      +
      Returns:
      The resulting tensor produced by this function.
      -
  • -
  • -
    -

    toString

    -
    String toString()
    + + + + +
      +
    • +

      toString

      +
      java.lang.String toString()
      Turns this function into a string representation which can be used to reconstruct this function or combine it with other function strings to parse entirely new functions...
      -
      -
      Overrides:
      -
      toString in class Object
      -
      Returns:
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      +
      Returns:
      The string representation of this function.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/FunctionCache.html b/docs/jdocs/neureka/math/FunctionCache.html index 1961dd636..770a2df74 100644 --- a/docs/jdocs/neureka/math/FunctionCache.html +++ b/docs/jdocs/neureka/math/FunctionCache.html @@ -1,197 +1,319 @@ - + + - -FunctionCache (neureka 1.0.0 API) - - - - + +FunctionCache (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.math
    -

    Class FunctionCache

    -
    -
    java.lang.Object -
    neureka.math.FunctionCache
    +
    neureka.math
    +

    Class FunctionCache

    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.math.FunctionCache
      • +
      +
    • +
    +
    +
      +

    • -
      public final class FunctionCache -extends Object
      -
      This class is part of a given BackendContext instance - responsible for caching Function references based on - their String representation generated by Object.toString() +
      +
      public final class FunctionCache
      +extends java.lang.Object
      +
      This class is part of a given BackendContext instance + responsible for caching Function references based on + their String representation generated by Object.toString() as well as caching of results for active functions.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FunctionCache

        -
        public FunctionCache()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FunctionCache

            +
            public FunctionCache()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      put

      -
      public void put(Function function)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          put

          +
          public void put(Function function)
        • -
        • -
          -

          get

          -
          public Function get(String expression, - boolean doesAD)
          -
          +
        + + + +
          +
        • +

          get

          +
          public Function get(java.lang.String expression,
          +                    boolean doesAD)
        • -
        • -
          -

          has

          -
          public boolean has(String expression, - boolean doesAD)
          -
          +
        + + + +
          +
        • +

          has

          +
          public boolean has(java.lang.String expression,
          +                   boolean doesAD)
        • -
        • -
          -

          toString

          -
          public String toString()
          -
          -
          Overrides:
          -
          toString in class Object
          +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/Functions.html b/docs/jdocs/neureka/math/Functions.html index cc236af45..e818185e1 100644 --- a/docs/jdocs/neureka/math/Functions.html +++ b/docs/jdocs/neureka/math/Functions.html @@ -1,1252 +1,1752 @@ - + + - -Functions (neureka 1.0.0 API) - - - - + +Functions (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.math
    -

    Class Functions

    +
    neureka.math
    +

    Class Functions

    -
    java.lang.Object -
    neureka.math.Functions
    -
    -
    -
    -
    public class Functions -extends Object
    -
    -
    -
      - +
      +
        +
      • java.lang.Object
      • -
        -

        Constructor Summary

        -
        Constructors
        -
        -
        Constructor
        -
        Description
        -
        Functions(boolean doingAD)
        -
         
        +
          +
        • neureka.math.Functions
        • +
        +
      • +
      +
      +
        +
      • +
        +
        +
        public class Functions
        +extends java.lang.Object
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Functions

        -
        public Functions(boolean doingAD)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Functions

            +
            public Functions(boolean doingAD)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      getReshape

      -
      public final Function getReshape()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getReshape

          +
          public final Function getReshape()
        • -
        • -
          -

          reshape

          -
          public final Function reshape()
          -
          +
        + + + +
          +
        • +

          reshape

          +
          public final Function reshape()
        • -
        • -
          -

          getRelayout

          -
          public final Function getRelayout()
          -
          +
        + + + +
          +
        • +

          getRelayout

          +
          public final Function getRelayout()
        • -
        • -
          -

          relayout

          -
          public final Function relayout()
          -
          +
        + + + +
          +
        • +

          relayout

          +
          public final Function relayout()
        • -
        • -
          -

          getPermute

          -
          public final Function getPermute()
          -
          +
        + + + +
          +
        • +

          getPermute

          +
          public final Function getPermute()
        • -
        • -
          -

          permute

          -
          public final Function permute()
          -
          +
        + + + +
          +
        • +

          permute

          +
          public final Function permute()
        • -
        • -
          -

          getDimTrim

          -
          public final Function getDimTrim()
          -
          +
        + + + +
          +
        • +

          getDimTrim

          +
          public final Function getDimTrim()
        • -
        • -
          -

          dimTrim

          -
          public final Function dimTrim()
          -
          +
        + + + +
          +
        • +

          dimTrim

          +
          public final Function dimTrim()
        • -
        • -
          -

          getIdy

          -
          public final Function getIdy()
          -
          +
        + + + +
          +
        • +

          getIdy

          +
          public final Function getIdy()
        • -
        • -
          -

          idy

          -
          public final Function idy()
          -
          +
        + + + +
          +
        • +

          idy

          +
          public final Function idy()
        • -
        • -
          -

          getConv

          -
          public final Function getConv()
          -
          -
          Returns:
          -
          A Function which represents the standard convolution operation without 0 padding.
          +
        + + + +
          +
        • +

          getConv

          +
          public final Function getConv()
          +
          +
          Returns:
          +
          A Function which represents the standard convolution operation without 0 padding.
          -
  • -
  • -
    -

    conv

    -
    public final Function conv()
    -
    -
    Returns:
    -
    A Function which represents the standard convolution operation without 0 padding.
    + + + + +
      +
    • +

      conv

      +
      public final Function conv()
      +
      +
      Returns:
      +
      A Function which represents the standard convolution operation without 0 padding.
      -
  • -
  • -
    -

    getPlus

    -
    public final Function getPlus()
    -
    + + + + +
      +
    • +

      getPlus

      +
      public final Function getPlus()
    • -
    • -
      -

      plus

      -
      public final Function plus()
      -
      +
    + + + +
      +
    • +

      plus

      +
      public final Function plus()
    • -
    • -
      -

      getPlusAssign

      -
      public final Function getPlusAssign()
      -
      +
    + + + +
      +
    • +

      getPlusAssign

      +
      public final Function getPlusAssign()
    • -
    • -
      -

      plusAssign

      -
      public final Function plusAssign()
      -
      +
    + + + +
      +
    • +

      plusAssign

      +
      public final Function plusAssign()
    • -
    • -
      -

      getMinus

      -
      public final Function getMinus()
      -
      +
    + + + +
      +
    • +

      getMinus

      +
      public final Function getMinus()
    • -
    • -
      -

      minus

      -
      public final Function minus()
      -
      +
    + + + +
      +
    • +

      minus

      +
      public final Function minus()
    • -
    • -
      -

      getMinusAssign

      -
      public final Function getMinusAssign()
      -
      +
    + + + +
      +
    • +

      getMinusAssign

      +
      public final Function getMinusAssign()
    • -
    • -
      -

      minusAssign

      -
      public final Function minusAssign()
      -
      +
    + + + +
      +
    • +

      minusAssign

      +
      public final Function minusAssign()
    • -
    • -
      -

      getDiv

      -
      public final Function getDiv()
      -
      +
    + + + +
      +
    • +

      getDiv

      +
      public final Function getDiv()
    • -
    • -
      -

      div

      -
      public final Function div()
      -
      +
    + + + +
      +
    • +

      div

      +
      public final Function div()
    • -
    • -
      -

      getDivAssign

      -
      public final Function getDivAssign()
      -
      +
    + + + +
      +
    • +

      getDivAssign

      +
      public final Function getDivAssign()
    • -
    • -
      -

      divAssign

      -
      public final Function divAssign()
      -
      +
    + + + +
      +
    • +

      divAssign

      +
      public final Function divAssign()
    • -
    • -
      -

      getPow

      -
      public final Function getPow()
      -
      +
    + + + +
      +
    • +

      getPow

      +
      public final Function getPow()
    • -
    • -
      -

      pow

      -
      public final Function pow()
      -
      +
    + + + +
      +
    • +

      pow

      +
      public final Function pow()
    • -
    • -
      -

      getPowAssign

      -
      public final Function getPowAssign()
      -
      +
    + + + +
      +
    • +

      getPowAssign

      +
      public final Function getPowAssign()
    • -
    • -
      -

      powAssign

      -
      public final Function powAssign()
      -
      +
    + + + +
      +
    • +

      powAssign

      +
      public final Function powAssign()
    • -
    • -
      -

      getMul

      -
      public final Function getMul()
      -
      +
    + + + +
      +
    • +

      getMul

      +
      public final Function getMul()
    • -
    • -
      -

      mul

      -
      public final Function mul()
      -
      +
    + + + +
      +
    • +

      mul

      +
      public final Function mul()
    • -
    • -
      -

      getMulAssign

      -
      public final Function getMulAssign()
      -
      +
    + + + +
      +
    • +

      getMulAssign

      +
      public final Function getMulAssign()
    • -
    • -
      -

      mulAssign

      -
      public final Function mulAssign()
      -
      +
    + + + +
      +
    • +

      mulAssign

      +
      public final Function mulAssign()
    • -
    • -
      -

      getAdd

      -
      public final Function getAdd()
      -
      +
    + + + +
      +
    • +

      getAdd

      +
      public final Function getAdd()
    • -
    • -
      -

      add

      -
      public final Function add()
      -
      +
    + + + +
      +
    • +

      add

      +
      public final Function add()
    • -
    • -
      -

      getAddAssign

      -
      public final Function getAddAssign()
      -
      +
    + + + +
      +
    • +

      getAddAssign

      +
      public final Function getAddAssign()
    • -
    • -
      -

      addAssign

      -
      public final Function addAssign()
      -
      +
    + + + +
      +
    • +

      addAssign

      +
      public final Function addAssign()
    • -
    • -
      -

      getMod

      -
      public final Function getMod()
      -
      +
    + + + +
      +
    • +

      getMod

      +
      public final Function getMod()
    • -
    • -
      -

      mod

      -
      public final Function mod()
      -
      +
    + + + +
      +
    • +

      mod

      +
      public final Function mod()
    • -
    • -
      -

      getModAssign

      -
      public final Function getModAssign()
      -
      +
    + + + +
      +
    • +

      getModAssign

      +
      public final Function getModAssign()
    • -
    • -
      -

      modAssign

      -
      public final Function modAssign()
      -
      +
    + + + +
      +
    • +

      modAssign

      +
      public final Function modAssign()
    • -
    • -
      -

      getNeg

      -
      public final Function getNeg()
      -
      +
    + + + +
      +
    • +

      getNeg

      +
      public final Function getNeg()
    • -
    • -
      -

      neg

      -
      public final Function neg()
      -
      +
    + + + +
      +
    • +

      neg

      +
      public final Function neg()
    • -
    • -
      -

      getMatMul

      -
      public final Function getMatMul()
      -
      +
    + + + +
      +
    • +

      getMatMul

      +
      public final Function getMatMul()
    • -
    • -
      -

      matMul

      -
      public final Function matMul()
      -
      +
    + + + +
      +
    • +

      matMul

      +
      public final Function matMul()
    • -
    • -
      -

      getDot

      -
      public final Function getDot()
      -
      +
    + + + +
      +
    • +

      getDot

      +
      public final Function getDot()
    • -
    • -
      -

      dot

      -
      public final Function dot()
      -
      +
    + + + +
      +
    • +

      dot

      +
      public final Function dot()
    • -
    • -
      -

      getTranspose2D

      -
      public final Function getTranspose2D()
      -
      +
    + + + +
      +
    • +

      getTranspose2D

      +
      public final Function getTranspose2D()
    • -
    • -
      -

      transpose2D

      -
      public final Function transpose2D()
      -
      +
    + + + +
      +
    • +

      transpose2D

      +
      public final Function transpose2D()
    • -
    • -
      -

      getRandom

      -
      public final Function getRandom()
      -
      +
    + + + +
      +
    • +

      getRandom

      +
      public final Function getRandom()
    • -
    • -
      -

      random

      -
      public final Function random()
      -
      +
    + + + +
      +
    • +

      random

      +
      public final Function random()
    • -
    • -
      -

      getTanh

      -
      public final Function getTanh()
      -
      -
      Returns:
      -
      A tanh Function based on: 2 / ( 1 + Math.exp( -x * 2 ) ) - 1.
      +
    + + + +
      +
    • +

      getTanh

      +
      public final Function getTanh()
      +
      +
      Returns:
      +
      A tanh Function based on: 2 / ( 1 + Math.exp( -x * 2 ) ) - 1.
      -
    • -
    • -
      -

      tanh

      -
      public final Function tanh()
      -
      -
      Returns:
      -
      A tanh Function based on: 2 / ( 1 + Math.exp( -x * 2 ) ) - 1.
      +
    + + + +
      +
    • +

      tanh

      +
      public final Function tanh()
      +
      +
      Returns:
      +
      A tanh Function based on: 2 / ( 1 + Math.exp( -x * 2 ) ) - 1.
      -
    • -
    • -
      -

      getFastTanh

      -
      public final Function getFastTanh()
      -
      -
      Returns:
      -
      A fast quasi tanh Function based on: x * FastFun.invSqrt( 1 + x * x ).
      +
    + + + +
      +
    • +

      getFastTanh

      +
      public final Function getFastTanh()
      +
      +
      Returns:
      +
      A fast quasi tanh Function based on: x * FastFun.invSqrt( 1 + x * x ).
      -
    • -
    • -
      -

      fastTanh

      -
      public final Function fastTanh()
      -
      -
      Returns:
      -
      A fast quasi tanh Function based on: x * FastFun.invSqrt( 1 + x * x ).
      +
    + + + +
      +
    • +

      fastTanh

      +
      public final Function fastTanh()
      +
      +
      Returns:
      +
      A fast quasi tanh Function based on: x * FastFun.invSqrt( 1 + x * x ).
      -
    • -
    • -
      -

      getSoftsign

      -
      public final Function getSoftsign()
      +
    + + + +
      +
    • +

      getSoftsign

      +
      public final Function getSoftsign()
      The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function - which rescales the inputs between -1 and 1, very much like the Tanh function. + which rescales the inputs between -1 and 1, very much like the Tanh function. The softsign function converges polynomially and is computationally cheaper than the tanh function which converges exponentially.
      -
      -
      Returns:
      -
      A very fast quasi tanh Function based on: x / ( 1 + Math.abs( x ) ).
      +
      +
      Returns:
      +
      A very fast quasi tanh Function based on: x / ( 1 + Math.abs( x ) ).
      -
    • -
    • -
      -

      softsign

      -
      public final Function softsign()
      +
    + + + +
      +
    • +

      softsign

      +
      public final Function softsign()
      The softsign function, defined as x / ( 1 + Math.abs( x ) ), is a computationally cheap 0 centered activation function - which rescales the inputs between -1 and 1, very much like the Tanh function. + which rescales the inputs between -1 and 1, very much like the Tanh function. The softsign function converges polynomially and is computationally cheaper than the tanh function which converges exponentially.
      -
      -
      Returns:
      -
      A very fast quasi tanh Function based on: x / ( 1 + Math.abs( x ) ).
      +
      +
      Returns:
      +
      A very fast quasi tanh Function based on: x / ( 1 + Math.abs( x ) ).
      -
    • -
    • -
      -

      getSigmoid

      -
      public final Function getSigmoid()
      -
      -
      Returns:
      -
      A sigmoid Function based on: 1 / ( 1 + Math.exp( -x ) ).
      +
    + + + +
      +
    • +

      getSigmoid

      +
      public final Function getSigmoid()
      +
      +
      Returns:
      +
      A sigmoid Function based on: 1 / ( 1 + Math.exp( -x ) ).
      -
    • -
    • -
      -

      sigmoid

      -
      public final Function sigmoid()
      -
      -
      Returns:
      -
      A sigmoid Function based on: 1 / ( 1 + Math.exp( -x ) ).
      +
    + + + +
      +
    • +

      sigmoid

      +
      public final Function sigmoid()
      +
      +
      Returns:
      +
      A sigmoid Function based on: 1 / ( 1 + Math.exp( -x ) ).
      -
    • -
    • -
      -

      getGaus

      -
      public final Function getGaus()
      -
      -
      Returns:
      -
      A gaussian Function based on: Math.exp( -( x * x ) ).
      +
    + + + +
      +
    • +

      getGaus

      +
      public final Function getGaus()
      +
      +
      Returns:
      +
      A gaussian Function based on: Math.exp( -( x * x ) ).
      -
    • -
    • -
      -

      gaus

      -
      public final Function gaus()
      -
      -
      Returns:
      -
      A gaussian Function based on: Math.exp( -( x * x ) ).
      +
    + + + +
      +
    • +

      gaus

      +
      public final Function gaus()
      +
      +
      Returns:
      +
      A gaussian Function based on: Math.exp( -( x * x ) ).
      -
    • -
    • -
      -

      getFastGaus

      -
      public final Function getFastGaus()
      -
      -
      Returns:
      -
      A quasi gaussian Function based on: 1 / ( 1 + x * x ).
      +
    + + + +
      +
    • +

      getFastGaus

      +
      public final Function getFastGaus()
      +
      +
      Returns:
      +
      A quasi gaussian Function based on: 1 / ( 1 + x * x ).
      -
    • -
    • -
      -

      fastGaus

      -
      public final Function fastGaus()
      -
      -
      Returns:
      -
      A quasi gaussian Function based on: 1 / ( 1 + x * x ).
      +
    + + + +
      +
    • +

      fastGaus

      +
      public final Function fastGaus()
      +
      +
      Returns:
      +
      A quasi gaussian Function based on: 1 / ( 1 + x * x ).
      -
    • -
    • -
      -

      getLn

      -
      public final Function getLn()
      -
      -
      Returns:
      -
      A natural log Function based on: Math.log( x ).
      +
    + + + +
      +
    • +

      getLn

      +
      public final Function getLn()
      +
      +
      Returns:
      +
      A natural log Function based on: Math.log( x ).
      -
    • -
    • -
      -

      ln

      -
      public final Function ln()
      -
      -
      Returns:
      -
      A natural log Function based on: Math.log( x ).
      +
    + + + +
      +
    • +

      ln

      +
      public final Function ln()
      +
      +
      Returns:
      +
      A natural log Function based on: Math.log( x ).
      -
    • -
    • -
      -

      getQuad

      -
      public final Function getQuad()
      -
      -
      Returns:
      -
      A quadratic Function based on: x * x.
      +
    + + + +
      +
    • +

      getQuad

      +
      public final Function getQuad()
      +
      +
      Returns:
      +
      A quadratic Function based on: x * x.
      -
    • -
    • -
      -

      quad

      -
      public final Function quad()
      -
      -
      Returns:
      -
      A quadratic Function based on: x * x.
      +
    + + + +
      +
    • +

      quad

      +
      public final Function quad()
      +
      +
      Returns:
      +
      A quadratic Function based on: x * x.
      -
    • -
    • -
      -

      getRelu

      -
      public final Function getRelu()
      -
      -
      Returns:
      -
      A rectified linear unit Function based on: ( x >= 0 ? x : x * .01 ).
      +
    + + + +
      +
    • +

      getRelu

      +
      public final Function getRelu()
      +
      +
      Returns:
      +
      A rectified linear unit Function based on: ( x >= 0 ? x : x * .01 ).
      -
    • -
    • -
      -

      relu

      -
      public final Function relu()
      -
      -
      Returns:
      -
      A rectified linear unit Function based on: ( x >= 0 ? x : x * .01 ).
      +
    + + + +
      +
    • +

      relu

      +
      public final Function relu()
      +
      +
      Returns:
      +
      A rectified linear unit Function based on: ( x >= 0 ? x : x * .01 ).
      -
    • -
    • -
      -

      getAbs

      -
      public final Function getAbs()
      -
      -
      Returns:
      -
      An absolute Function based on: Math.abs(x).
      +
    + + + +
      +
    • +

      getAbs

      +
      public final Function getAbs()
      +
      +
      Returns:
      +
      An absolute Function based on: Math.abs(x).
      -
    • -
    • -
      -

      abs

      -
      public final Function abs()
      -
      -
      Returns:
      -
      An absolute Function based on: Math.abs(x).
      +
    + + + +
      +
    • +

      abs

      +
      public final Function abs()
      +
      +
      Returns:
      +
      An absolute Function based on: Math.abs(x).
      -
    • -
    • -
      -

      getSin

      -
      public final Function getSin()
      -
      -
      Returns:
      -
      A sine Function based on: Math.sin(x).
      +
    + + + +
      +
    • +

      getSin

      +
      public final Function getSin()
      +
      +
      Returns:
      +
      A sine Function based on: Math.sin(x).
      -
    • -
    • -
      -

      sin

      -
      public final Function sin()
      -
      -
      Returns:
      -
      A sine Function based on: Math.sin(x).
      +
    + + + +
      +
    • +

      sin

      +
      public final Function sin()
      +
      +
      Returns:
      +
      A sine Function based on: Math.sin(x).
      -
    • -
    • -
      -

      getCos

      -
      public final Function getCos()
      -
      -
      Returns:
      -
      A cosine Function based on: Math.cos(x).
      +
    + + + +
      +
    • +

      getCos

      +
      public final Function getCos()
      +
      +
      Returns:
      +
      A cosine Function based on: Math.cos(x).
      -
    • -
    • -
      -

      cos

      -
      public final Function cos()
      -
      -
      Returns:
      -
      A cosine Function based on: Math.cos(x).
      +
    + + + +
      +
    • +

      cos

      +
      public final Function cos()
      +
      +
      Returns:
      +
      A cosine Function based on: Math.cos(x).
      -
    • -
    • -
      -

      getSoftplus

      -
      public final Function getSoftplus()
      +
    + + + +
      +
    • +

      getSoftplus

      +
      public final Function getSoftplus()
      SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
      -
      -
      Returns:
      -
      A softplus Function based on: Math.log( 1 + Math.exp( x ) ).
      +
      +
      Returns:
      +
      A softplus Function based on: Math.log( 1 + Math.exp( x ) ).
      -
    • -
    • -
      -

      softplus

      -
      public final Function softplus()
      +
    + + + +
      +
    • +

      softplus

      +
      public final Function softplus()
      SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive.
      -
      -
      Returns:
      -
      A softplus Function based on: Math.log( 1 + Math.exp( x ) ).
      +
      +
      Returns:
      +
      A softplus Function based on: Math.log( 1 + Math.exp( x ) ).
      -
    • -
    • -
      -

      getSilu

      -
      public final Function getSilu()
      +
    + + + +
      +
    • +

      getSilu

      +
      public final Function getSilu()
      The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x). It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below.
      -
      -
      Returns:
      -
      A SiLu Function (also known as swish) based on: x / ( 1 + Math.exp( -x ) ).
      +
      +
      Returns:
      +
      A SiLu Function (also known as swish) based on: x / ( 1 + Math.exp( -x ) ).
      -
    • -
    • -
      -

      silu

      -
      public final Function silu()
      +
    + + + +
      +
    • +

      silu

      +
      public final Function silu()
      The SiLu activation function, also known as the swish function, is defined as x * sigmoid(x). It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below.
      -
      -
      Returns:
      -
      A SiLu Function (also known as swish) based on: x / ( 1 + Math.exp( -x ) ).
      +
      +
      Returns:
      +
      A SiLu Function (also known as swish) based on: x / ( 1 + Math.exp( -x ) ).
      -
    • -
    • -
      -

      getGelu

      -
      public final Function getGelu()
      -
      -
      Returns:
      -
      A GeLU Function based on: x / ( 1 + Math.exp( -x * 1.702 ) ).
      +
    + + + +
      +
    • +

      getGelu

      +
      public final Function getGelu()
      +
      +
      Returns:
      +
      A GeLU Function based on: x / ( 1 + Math.exp( -x * 1.702 ) ).
      -
    • -
    • -
      -

      gelu

      -
      public final Function gelu()
      -
      -
      Returns:
      -
      A GeLU Function based on: x / ( 1 + Math.exp( -x * 1.702 ) ).
      +
    + + + +
      +
    • +

      gelu

      +
      public final Function gelu()
      +
      +
      Returns:
      +
      A GeLU Function based on: x / ( 1 + Math.exp( -x * 1.702 ) ).
      -
    • -
    • -
      -

      getSelu

      -
      public final Function getSelu()
      +
    + + + +
      +
    • +

      getSelu

      +
      public final Function getSelu()
      The Scaled Exponential Linear Unit, or SELU, is an activation functions that induce self-normalizing properties. The SELU activation function is implemented as: - - if ( x > 0 ) return SCALE * x; + if ( x > 0 ) return SCALE * x; else if ( x <= 0 ) return SCALE * ALPHA * (Math.exp(x) - 1); else return Float.NaN;
      ...where ALPHA == 1.6733 and SCALE == 1.0507.
      -
      -
      Returns:
      -
      A SeLU Function.
      +
      +
      Returns:
      +
      A SeLU Function.
      -
    • -
    • -
      -

      selu

      -
      public final Function selu()
      +
    + + + +
      +
    • +

      selu

      +
      public final Function selu()
      The Scaled Exponential Linear Unit, or SELU, is an activation functions that induce self-normalizing properties. The SELU activation function is implemented as: - - if ( x > 0 ) return SCALE * x; + if ( x > 0 ) return SCALE * x; else if ( x <= 0 ) return SCALE * ALPHA * Math.exp(x); else return Double.NaN;
      ...where ALPHA == 1.6733 and SCALE == 1.0507.
      -
      -
      Returns:
      -
      A SeLU Function.
      +
      +
      Returns:
      +
      A SeLU Function.
      -
    • -
    • -
      -

      getMin

      -
      public final Function getMin()
      -
      +
    + + + +
      +
    • +

      getMin

      +
      public final Function getMin()
    • -
    • -
      -

      min

      -
      public final Function min()
      -
      +
    + + + +
      +
    • +

      min

      +
      public final Function min()
    • -
    • -
      -

      getMax

      -
      public final Function getMax()
      -
      +
    + + + +
      +
    • +

      getMax

      +
      public final Function getMax()
    • -
    • -
      -

      max

      -
      public final Function max()
      -
      +
    + + + +
      +
    • +

      max

      +
      public final Function max()
    • -
    • -
      -

      getSum

      -
      public final Function getSum()
      -
      +
    + + + +
      +
    • +

      getSum

      +
      public final Function getSum()
    • -
    • -
      -

      sum

      -
      public final Function sum()
      -
      +
    + + + +
      +
    • +

      sum

      +
      public final Function sum()
    • -
    • -
      -

      getExp

      -
      public final Function getExp()
      -
      +
    + + + +
      +
    • +

      getExp

      +
      public final Function getExp()
    • -
    • -
      -

      exp

      -
      public final Function exp()
      -
      +
    + + + +
      +
    • +

      exp

      +
      public final Function exp()
    • -
    • -
      -

      getLog10

      -
      public final Function getLog10()
      -
      +
    + + + +
      +
    • +

      getLog10

      +
      public final Function getLog10()
    • -
    • -
      -

      log10

      -
      public final Function log10()
      -
      +
    + + + +
      +
    • +

      log10

      +
      public final Function log10()
    • -
    • -
      -

      getSqrt

      -
      public final Function getSqrt()
      -
      +
    + + + +
      +
    • +

      getSqrt

      +
      public final Function getSqrt()
    • -
    • -
      -

      sqrt

      -
      public final Function sqrt()
      -
      +
    + + + +
      +
    • +

      sqrt

      +
      public final Function sqrt()
    • -
    • -
      -

      getCbrt

      -
      public final Function getCbrt()
      -
      +
    + + + +
      +
    • +

      getCbrt

      +
      public final Function getCbrt()
    • -
    • -
      -

      cbrt

      -
      public final Function cbrt()
      -
      +
    + + + +
      +
    • +

      cbrt

      +
      public final Function cbrt()
    • -
    • -
      -

      getConcat

      -
      public final Function getConcat()
      -
      -
      Returns:
      -
      The "concat" Function that concatenates two Tensors alongside a specific axis.
      +
    + + + +
      +
    • +

      getConcat

      +
      public final Function getConcat()
      +
      +
      Returns:
      +
      The "concat" Function that concatenates two Tensors alongside a specific axis.
      -
    • -
    • -
      -

      concat

      -
      public final Function concat()
      -
      -
      Returns:
      -
      The "concat" Function that concatenates two Tensors alongside a specific axis.
      +
    + + + +
      +
    • +

      concat

      +
      public final Function concat()
      +
      +
      Returns:
      +
      The "concat" Function that concatenates two Tensors alongside a specific axis.
      -
    • -
    • -
      -

      toString

      -
      public final String toString()
      -
      -
      Overrides:
      -
      toString in class Object
      +
    + + + +
      +
    • +

      toString

      +
      public final java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Axis.html b/docs/jdocs/neureka/math/args/Arg.Axis.html index 1f50be05f..d62035c83 100644 --- a/docs/jdocs/neureka/math/args/Arg.Axis.html +++ b/docs/jdocs/neureka/math/args/Arg.Axis.html @@ -1,156 +1,276 @@ - + + - -Arg.Axis (neureka 1.0.0 API) - - - - + +Arg.Axis (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Axis

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<Integer> -
    neureka.math.args.Arg.Axis
    +
    neureka.math.args
    +

    Class Arg.Axis

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • + +
    • +
    +
    +
    -
    -
      +
      +
      public static class Arg.Axis
      +extends Arg<java.lang.Integer>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static Arg.Axis of(int index)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            public static Arg.Axis of(int index)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.DerivIdx.html b/docs/jdocs/neureka/math/args/Arg.DerivIdx.html index 1a87f0830..4d059bc7f 100644 --- a/docs/jdocs/neureka/math/args/Arg.DerivIdx.html +++ b/docs/jdocs/neureka/math/args/Arg.DerivIdx.html @@ -1,93 +1,127 @@ - + + - -Arg.DerivIdx (neureka 1.0.0 API) - - - - + +Arg.DerivIdx (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.DerivIdx

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<Integer> -
    neureka.math.args.Arg.DerivIdx
    +
    neureka.math.args
    +

    Class Arg.DerivIdx

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • + +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Component<Args>
      +
      Component<Args>
      -
      +
      Enclosing class:
      -
      Arg<T>
      +
      Arg<T>

      -
      public static class Arg.DerivIdx -extends Arg<Integer>
      +
      +
      public static class Arg.DerivIdx
      +extends Arg<java.lang.Integer>
      This is an import argument whose role might not be clear at first : An operation can have multiple inputs, however @@ -96,69 +130,155 @@

      Class Arg.DerivIdx

      So the "derivative index" targets said input. This property is -1 when no derivative should be calculated, however 0... when targeting an input to calculate the derivative of.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Derivative.html b/docs/jdocs/neureka/math/args/Arg.Derivative.html index 81909c170..c101eadcc 100644 --- a/docs/jdocs/neureka/math/args/Arg.Derivative.html +++ b/docs/jdocs/neureka/math/args/Arg.Derivative.html @@ -1,156 +1,276 @@ - + + - -Arg.Derivative (neureka 1.0.0 API) - - - - + +Arg.Derivative (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Derivative<V>

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<Tensor<V>> -
    neureka.math.args.Arg.Derivative<V>
    +
    neureka.math.args
    +

    Class Arg.Derivative<V>

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.Derivative<V>
      +extends Arg<Tensor<V>>
      + +
    +
    +
    +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Ends.html b/docs/jdocs/neureka/math/args/Arg.Ends.html index 420290490..4fdaad6e5 100644 --- a/docs/jdocs/neureka/math/args/Arg.Ends.html +++ b/docs/jdocs/neureka/math/args/Arg.Ends.html @@ -1,156 +1,276 @@ - + + - -Arg.Ends (neureka 1.0.0 API) - - - - + +Arg.Ends (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Ends

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<int[]> -
    neureka.math.args.Arg.Ends
    +
    neureka.math.args
    +

    Class Arg.Ends

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.Ends
      +extends Arg<int[]>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static Arg.Ends of(int[] arg)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            public static Arg.Ends of(int[] arg)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Indices.html b/docs/jdocs/neureka/math/args/Arg.Indices.html index 2a0054c86..bdd57ec6e 100644 --- a/docs/jdocs/neureka/math/args/Arg.Indices.html +++ b/docs/jdocs/neureka/math/args/Arg.Indices.html @@ -1,156 +1,276 @@ - + + - -Arg.Indices (neureka 1.0.0 API) - - - - + +Arg.Indices (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Indices

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<int[]> -
    neureka.math.args.Arg.Indices
    +
    neureka.math.args
    +

    Class Arg.Indices

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.Indices
      +extends Arg<int[]>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Layout.html b/docs/jdocs/neureka/math/args/Arg.Layout.html index 0f42cb7d8..d5499f7be 100644 --- a/docs/jdocs/neureka/math/args/Arg.Layout.html +++ b/docs/jdocs/neureka/math/args/Arg.Layout.html @@ -1,156 +1,276 @@ - + + - -Arg.Layout (neureka 1.0.0 API) - - - - + +Arg.Layout (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Layout

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<NDConfiguration.Layout> -
    neureka.math.args.Arg.Layout
    +
    neureka.math.args
    +

    Class Arg.Layout

    -
    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.MinRank.html b/docs/jdocs/neureka/math/args/Arg.MinRank.html index 6d4901b16..7c4eba87e 100644 --- a/docs/jdocs/neureka/math/args/Arg.MinRank.html +++ b/docs/jdocs/neureka/math/args/Arg.MinRank.html @@ -1,156 +1,276 @@ - + + - -Arg.MinRank (neureka 1.0.0 API) - - - - + +Arg.MinRank (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.MinRank

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<Integer> -
    neureka.math.args.Arg.MinRank
    +
    neureka.math.args
    +

    Class Arg.MinRank

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • + +
    • +
    +
    +
    -
    -
      +
      +
      public static class Arg.MinRank
      +extends Arg<java.lang.Integer>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Offset.html b/docs/jdocs/neureka/math/args/Arg.Offset.html index 972b4336d..a02310579 100644 --- a/docs/jdocs/neureka/math/args/Arg.Offset.html +++ b/docs/jdocs/neureka/math/args/Arg.Offset.html @@ -1,156 +1,276 @@ - + + - -Arg.Offset (neureka 1.0.0 API) - - - - + +Arg.Offset (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Offset

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<int[]> -
    neureka.math.args.Arg.Offset
    +
    neureka.math.args
    +

    Class Arg.Offset

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.Offset
      +extends Arg<int[]>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static Arg.Offset of(int... arg)
        -
        + -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Seed.html b/docs/jdocs/neureka/math/args/Arg.Seed.html index 632cdd7f4..6e7439f03 100644 --- a/docs/jdocs/neureka/math/args/Arg.Seed.html +++ b/docs/jdocs/neureka/math/args/Arg.Seed.html @@ -1,165 +1,289 @@ - + + - -Arg.Seed (neureka 1.0.0 API) - - - - + +Arg.Seed (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Seed

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<Long> -
    neureka.math.args.Arg.Seed
    +
    neureka.math.args
    +

    Class Arg.Seed

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.Seed
      +extends Arg<java.lang.Long>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static Arg.Seed of(String arg)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            public static Arg.Seed of(java.lang.String arg)
          • -
          • -
            -

            of

            -
            public static Arg.Seed of(long arg)
            -
            +
          + + + +
            +
          • +

            of

            +
            public static Arg.Seed of(long arg)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Shape.html b/docs/jdocs/neureka/math/args/Arg.Shape.html index 81daffba1..240f8f18a 100644 --- a/docs/jdocs/neureka/math/args/Arg.Shape.html +++ b/docs/jdocs/neureka/math/args/Arg.Shape.html @@ -1,156 +1,276 @@ - + + - -Arg.Shape (neureka 1.0.0 API) - - - - + +Arg.Shape (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Shape

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<int[]> -
    neureka.math.args.Arg.Shape
    +
    neureka.math.args
    +

    Class Arg.Shape

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.Shape
      +extends Arg<int[]>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static Arg.Shape of(int... arg)
        -
        + -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.Stride.html b/docs/jdocs/neureka/math/args/Arg.Stride.html index 963b78aab..29f1097cf 100644 --- a/docs/jdocs/neureka/math/args/Arg.Stride.html +++ b/docs/jdocs/neureka/math/args/Arg.Stride.html @@ -1,156 +1,276 @@ - + + - -Arg.Stride (neureka 1.0.0 API) - - - - + +Arg.Stride (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.Stride

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<int[]> -
    neureka.math.args.Arg.Stride
    +
    neureka.math.args
    +

    Class Arg.Stride

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.Stride
      +extends Arg<int[]>
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static Arg.Stride of(int... arg)
        -
        + -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.TargetDevice.html b/docs/jdocs/neureka/math/args/Arg.TargetDevice.html index 6ae34381d..a5ea562a4 100644 --- a/docs/jdocs/neureka/math/args/Arg.TargetDevice.html +++ b/docs/jdocs/neureka/math/args/Arg.TargetDevice.html @@ -1,156 +1,276 @@ - + + - -Arg.TargetDevice (neureka 1.0.0 API) - - - - + +Arg.TargetDevice (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.TargetDevice

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<Device<?>> -
    neureka.math.args.Arg.TargetDevice
    +
    neureka.math.args
    +

    Class Arg.TargetDevice

    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public static class Arg.TargetDevice
      +extends Arg<Device<?>>
      + +
    +
    +
    +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.VarIdx.html b/docs/jdocs/neureka/math/args/Arg.VarIdx.html index 74c8a498a..be7aabaa0 100644 --- a/docs/jdocs/neureka/math/args/Arg.VarIdx.html +++ b/docs/jdocs/neureka/math/args/Arg.VarIdx.html @@ -1,160 +1,280 @@ - + + - -Arg.VarIdx (neureka 1.0.0 API) - - - - + +Arg.VarIdx (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg.VarIdx

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<Integer> -
    neureka.math.args.Arg.VarIdx
    +
    neureka.math.args
    +

    Class Arg.VarIdx

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • + +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Component<Args>
      +
      Component<Args>
      -
      +
      Enclosing class:
      -
      Arg<T>
      +
      Arg<T>

      -
      public static class Arg.VarIdx -extends Arg<Integer>
      +
      +
      public static class Arg.VarIdx
      +extends Arg<java.lang.Integer>
      The following argument is relevant for a particular type of operation, namely: an "indexer".
      An indexer automatically applies an operation on all inputs for a given function. The (indexer) function will execute the sub functions (of the AST) for every input index. If a particular index is not targeted however this variable will simply default to -1.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Arg.html b/docs/jdocs/neureka/math/args/Arg.html index 8320848aa..127bdbd43 100644 --- a/docs/jdocs/neureka/math/args/Arg.html +++ b/docs/jdocs/neureka/math/args/Arg.html @@ -1,243 +1,321 @@ - + + - -Arg (neureka 1.0.0 API) - - - - + +Arg (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Arg<T>

    -
    -
    java.lang.Object -
    neureka.math.args.Arg<T>
    +
    neureka.math.args
    +

    Class Arg<T>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.math.args.Arg<T>
      • +
      +
    • +
    +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Arg

        -
        public Arg(T arg)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + + + +
            +
          • +

            Arg

            +
            public Arg(T arg)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      get

      -
      public T get()
      -
      +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          update

          +
          public boolean update(Component.OwnerChangeRequest<Args> changeRequest)
          +
          Description copied from interface: Component
          Components are not the slaves of their owners. If the owner registers any state changes related to a given component, then said component will be informed by the owner about the change as well as receive @@ -247,38 +325,100 @@

          update

          is being added to, or removed from, its current owner. If components hold references to their owners then this method gives them the ability to update said reference when a new owner takes over the components of an old one. - The Component.OwnerChangeRequest implementation instance passed to this method - informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). + The Component.OwnerChangeRequest implementation instance passed to this method + informs this component about the current state change and its type (Component.OwnerChangeRequest.type()). If this method returns false then this means that this component rejects the proposed update. The component owner will then abort the proposed change.
          -
          -
          Specified by:
          -
          update in interface Component<T>
          -
          Parameters:
          -
          changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
          -
          Returns:
          +
          +
          Specified by:
          +
          update in interface Component<Args>
          +
          Parameters:
          +
          changeRequest - An Component.OwnerChangeRequest implementation instance used to communicate the type of change, context information and the ability to execute the change directly.
          +
          Returns:
          The truth value determining if the state change should be aborted or not.
          -
  • -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/Args.html b/docs/jdocs/neureka/math/args/Args.html index ea7dbdd2c..5e803fa04 100644 --- a/docs/jdocs/neureka/math/args/Args.html +++ b/docs/jdocs/neureka/math/args/Args.html @@ -1,205 +1,276 @@ - + + - -Args (neureka 1.0.0 API) - - - - + +Args (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Args

    -
    -
    java.lang.Object - +
    neureka.math.args
    +

    Class Args

    -
    -
    +
    + +
    +
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Args

        -
        public Args()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Args

            +
            public Args()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      of

      -
      public static Args of(Arg<?>... arguments)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          of

          +
          public static Args of(Arg<?>... arguments)
        • -
        • -
          -

          valOf

          -
          public <V, -T extends Arg<V>> V valOf(Class<T> argumentClass)
          -
          +
        + + + +
          +
        • +

          valOf

          +
          public <V,T extends Arg<V>> V valOf(java.lang.Class<T> argumentClass)
        • -
        • -
          -

          valOfOr

          -
          public <V, -T extends Arg<V>> V valOfOr(Class<T> argumentClass, - V fallback)
          -
          +
        + + + + + +
          +
        • +

          valOfOr

          +
          public <V,T extends Arg<V>> V valOfOr(java.lang.Class<T> argumentClass,
          +                                      V fallback)
        • -
        • -
          -

          _setOrReject

          -
          protected <T extends Component<Args>> T _setOrReject(T newComponent)
          -
          Description copied from class: AbstractComponentOwner
          +
        + + + + + +
          +
        • +

          _setOrReject

          +
          protected <T extends Component<Args>> T _setOrReject(T newComponent)
          +
          Description copied from class: AbstractComponentOwner
          This abstract method ought to be implemented further down the inheritance hierarchy where it's responsibility makes more sense, namely : @@ -208,44 +279,108 @@

          _setOrReject

          of this class. Rejection in this case simply means that it returns null instead of the passed component.
          -
          -
          Specified by:
          -
          _setOrReject in class AbstractComponentOwner<Args>
          -
          Parameters:
          +
          +
          Specified by:
          +
          _setOrReject in class AbstractComponentOwner<Args>
          +
          Parameters:
          newComponent - The component which should be added to the components list.
          -
          Returns:
          +
          Returns:
          The same component or null if it has been rejected.
          -
  • -
  • -
    -

    _removeOrReject

    -
    protected <T extends Component<Args>> T _removeOrReject(T newComponent)
    -
    Description copied from class: AbstractComponentOwner
    + + + + + + +
      +
    • +

      _removeOrReject

      +
      protected <T extends Component<Args>> T _removeOrReject(T newComponent)
      +
      Description copied from class: AbstractComponentOwner
      An implementation of this method checks if the passed component should be removed from the component collection of this class or its removal should be "rejected". Rejection in this case simply means that it returns null instead of the passed component.
      -
      -
      Specified by:
      -
      _removeOrReject in class AbstractComponentOwner<Args>
      -
      Parameters:
      +
      +
      Specified by:
      +
      _removeOrReject in class AbstractComponentOwner<Args>
      +
      Parameters:
      newComponent - The component which should be removed from the components list.
      -
      Returns:
      +
      Returns:
      The same component or null if its removal has been rejected.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/args/package-frame.html b/docs/jdocs/neureka/math/args/package-frame.html new file mode 100644 index 000000000..f9ff2d945 --- /dev/null +++ b/docs/jdocs/neureka/math/args/package-frame.html @@ -0,0 +1,33 @@ + + + + + +neureka.math.args (neureka 1.0.1 API) + + + + +

    neureka.math.args

    + + + diff --git a/docs/jdocs/neureka/math/args/package-summary.html b/docs/jdocs/neureka/math/args/package-summary.html index edacca943..6f80b7837 100644 --- a/docs/jdocs/neureka/math/args/package-summary.html +++ b/docs/jdocs/neureka/math/args/package-summary.html @@ -1,143 +1,205 @@ - + + - -neureka.math.args (neureka 1.0.0 API) - - - - + +neureka.math.args (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.math.args

    -
    -
    -
    package neureka.math.args
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/math/args/package-tree.html b/docs/jdocs/neureka/math/args/package-tree.html index d799a8f2f..fa47a874b 100644 --- a/docs/jdocs/neureka/math/args/package-tree.html +++ b/docs/jdocs/neureka/math/args/package-tree.html @@ -1,92 +1,155 @@ - + + - -neureka.math.args Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.math.args Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.math.args

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/math/implementations/FunctionConstant.html b/docs/jdocs/neureka/math/implementations/FunctionConstant.html index 33692557b..6b634001c 100644 --- a/docs/jdocs/neureka/math/implementations/FunctionConstant.html +++ b/docs/jdocs/neureka/math/implementations/FunctionConstant.html @@ -1,428 +1,592 @@ - + + - -FunctionConstant (neureka 1.0.0 API) - - - - + +FunctionConstant (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FunctionConstant

    -
    -
    java.lang.Object -
    neureka.math.implementations.FunctionConstant
    +
    neureka.math.implementations
    +

    Class FunctionConstant

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.math.implementations.FunctionConstant
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Function
      +
      Function

      -
      public final class FunctionConstant -extends Object -implements Function
      -
      Instances of this implementation of the Function interface +
      +
      public final class FunctionConstant
      +extends java.lang.Object
      +implements Function
      +
      Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing constant numeric values to a function. When parsing an expression into a function then these constants are recognized by a series of digit characters optionally separated by '.' to represent decimal digits.
      So for example, when creating a function by calling the following factory method...

      - Function.of(java.lang.String)( "I[1] + (4 * I[0]) / 2.1" )
      + Function.of(java.lang.String)( "I[1] + (4 * I[0]) / 2.1" )

      ...then the substrings "4" and "2.1" will be parsed into instances of this class!
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FunctionConstant

        -
        public FunctionConstant(String expression)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FunctionConstant

            +
            public FunctionConstant(java.lang.String expression)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      value

      -
      public double value()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          value

          +
          public double value()
        • -
        • -
          -

          isFlat

          -
          public boolean isFlat()
          -
          -
          Specified by:
          -
          isFlat in interface Function
          -
          Returns:
          -
          The truth value determining if the sub-functions of this Function do not themselves reference Functions.
          +
        + + + +
          +
        • +

          isFlat

          +
          public boolean isFlat()
          +
          +
          Specified by:
          +
          isFlat in interface Function
          +
          Returns:
          +
          The truth value determining if the sub-functions of this Function do not themselves reference Functions.
          -
  • -
  • -
    -

    isDoingAD

    -
    public boolean isDoingAD()
    -
    Description copied from interface: Function
    -
    Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions - whose Function.isFlat() flag is set to false!
    -
    -
    Specified by:
    -
    isDoingAD in interface Function
    -
    Returns:
    -
    The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
    + + + + +
      +
    • +

      isDoingAD

      +
      public boolean isDoingAD()
      +
      Description copied from interface: Function
      +
      Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions + whose Function.isFlat() flag is set to false!
      +
      +
      Specified by:
      +
      isDoingAD in interface Function
      +
      Returns:
      +
      The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
      -
  • -
  • -
    -

    getOperation

    -
    public AbstractOperation getOperation()
    -
    -
    Specified by:
    -
    getOperation in interface Function
    -
    Returns:
    -
    The Operation implementation instance responsible for executing any inputs received by this Function or null if this Function.isFlat().
    + + + + +
  • -
  • -
    -

    dependsOn

    -
    public boolean dependsOn(int index)
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      dependsOn

      +
      public boolean dependsOn(int index)
      +
      Description copied from interface: Function
      Use this to determine if this function directly or indirectly references an input with the provided index.
      -
      -
      Specified by:
      -
      dependsOn in interface Function
      -
      Parameters:
      -
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      -
      Returns:
      -
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      +
      +
      Specified by:
      +
      dependsOn in interface Function
      +
      Parameters:
      +
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      +
      Returns:
      +
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      -
  • -
  • -
    -

    getDerivative

    -
    public Function getDerivative(int index)
    -
    Description copied from interface: Function
    -
    This method builds a new Function which is the derivative of this Function with respect to the provided input index.
    -
    -
    Specified by:
    -
    getDerivative in interface Function
    -
    Parameters:
    + + + + +
      +
    • +

      getDerivative

      +
      public Function getDerivative(int index)
      +
      Description copied from interface: Function
      +
      This method builds a new Function which is the derivative of this Function with respect to the provided input index.
      +
      +
      Specified by:
      +
      getDerivative in interface Function
      +
      Parameters:
      index - The index of the input which ought to serve as the variable which ought to be derived.
      -
      Returns:
      -
      The derivative of this Function.
      +
      Returns:
      +
      The derivative of this Function.
      -
  • -
  • -
    -

    getSubFunctions

    -
    public List<Function> getSubFunctions()
    -
    -
    Specified by:
    -
    getSubFunctions in interface Function
    -
    Returns:
    -
    The referenced child Function nodes of this Function AST node.
    + + + + +
  • -
  • -
    -

    call

    -
    public double call(double[] inputs, - int j)
    -
    Description copied from interface: Function
    -
    Invokes this Function with the provided array of inputs ad an index for input dependent indexing. - This method is functionally equivalent to Function.invoke(double[], int).
    -
    -
    Specified by:
    -
    call in interface Function
    -
    Parameters:
    + + + + +
      +
    • +

      call

      +
      public double call(double[] inputs,
      +                   int j)
      +
      Description copied from interface: Function
      +
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. + This method is functionally equivalent to Function.invoke(double[], int).
      +
      +
      Specified by:
      +
      call in interface Function
      +
      Parameters:
      inputs - The array of inputs.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
  • -
  • -
    -

    derive

    -
    public double derive(double[] inputs, - int index)
    -
    Description copied from interface: Function
    -
    Calculates the derivative of a particular input with respect to the output of this Function + + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int index)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs and an index targeting the input to be derived.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
  • -
  • -
    -

    derive

    -
    public double derive(double[] inputs, - int index, - int j)
    -
    Description copied from interface: Function
    -
    Calculates the derivative of a particular input with respect to the output of this Function + + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int index,
      +                     int j)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs, an index targeting the input to be derived and an index for input dependent indexing.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
  • -
  • -
    -

    execute

    -
    public Tensor<?> execute(Args arguments, - Tensor<?>... inputs)
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      execute

      +
      public Tensor<?> execute(Args arguments,
      +                         Tensor<?>... inputs)
      +
      Description copied from interface: Function
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      - Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
      -
      -
      Specified by:
      -
      execute in interface Function
      -
      Parameters:
      + Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
  • +
    +
    Specified by:
    +
    execute in interface Function
    +
    Parameters:
    arguments - A set of arguments you want to supply to this function for further control over the execution.
    inputs - The tensors which should be sent through this function.
    -
    Returns:
    +
    Returns:
    The resulting tensor produced by this function.
    - -
  • -
    -

    toString

    -
    public String toString()
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      Description copied from interface: Function
      Turns this function into a string representation which can be used to reconstruct this function or combine it with other function strings to parse entirely new functions...
      -
      -
      Specified by:
      -
      toString in interface Function
      -
      Overrides:
      -
      toString in class Object
      -
      Returns:
      +
      +
      Specified by:
      +
      toString in interface Function
      +
      Overrides:
      +
      toString in class java.lang.Object
      +
      Returns:
      The string representation of this function.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/implementations/FunctionInput.html b/docs/jdocs/neureka/math/implementations/FunctionInput.html index 44d6bacc8..d5ed6a7e8 100644 --- a/docs/jdocs/neureka/math/implementations/FunctionInput.html +++ b/docs/jdocs/neureka/math/implementations/FunctionInput.html @@ -1,442 +1,613 @@ - + + - -FunctionInput (neureka 1.0.0 API) - - - - + +FunctionInput (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FunctionInput

    -
    -
    java.lang.Object -
    neureka.math.implementations.FunctionInput
    +
    neureka.math.implementations
    +

    Class FunctionInput

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.math.implementations.FunctionInput
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Function
      +
      Function

      -
      public class FunctionInput -extends Object -implements Function
      -
      Instances of this implementation of the Function interface +
      +
      public class FunctionInput
      +extends java.lang.Object
      +implements Function
      +
      Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing inputs to a function. When parsing an expression into a function then these inputs are recognized by the character 'i' or 'I', followed by a whole number starting at zero (optionally wrapped by '[' and ']'), which is the index - of the argument within the list/array of arguments passed to a concrete Function instance.
      + of the argument within the list/array of arguments passed to a concrete Function instance.
      So for example, when creating a function by calling the following factory method...

      - Function.of(java.lang.String)( "I[1] + (4 * I[0]) / 2" )
      + Function.of(java.lang.String)( "I[1] + (4 * I[0]) / 2" )

      ...then the substrings "I[1]" and "I[0]" will be parsed into instances of this class!
      When calling this function by passing two arguments, let's say (first, second) then - the FunctionInput "I[0]" will pick the first argument, whereas "I[1]" + the FunctionInput "I[0]" will pick the first argument, whereas "I[1]" will pick the second argument when evaluating the array of arguments.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        public static Function of(String equation, - boolean doAD)
        -
        Description copied from interface: Function
        -
        This static factory method will return Function instances - based on a provided mathematical String expression describing the function +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            of

            +
            public static Function of(java.lang.String equation,
            +                          boolean doAD)
            +
            Description copied from interface: Function
            +
            This static factory method will return Function instances + based on a provided mathematical String expression describing the function using 'I[0]', 'I[1]', 'I[2]'... as input variables or 'I[j]' to enable input dependent indexing - like for example "sum( I[j] / 2 )" as well as a flag determining if the resulting Function + like for example "sum( I[j] / 2 )" as well as a flag determining if the resulting Function ought to be able to perform autograd or not.
            -
            -
            Parameters:
            +
            +
            Specified by:
            +
            of in interface Function
            +
            Parameters:
            equation - The right part of a function equation where inputs are denoted by 'I[0]', 'I[1]', 'I[2]'...
            -
            doAD - A flag determining if the produced Function should be able to perform autograd (aka. auto-differentiation)
            -
            Returns:
            -
            A Function instance created based on the provided String, ready to receive inputs and execute on them.
            +
            doAD - A flag determining if the produced Function should be able to perform autograd (aka. auto-differentiation)
            +
            Returns:
            +
            A Function instance created based on the provided String, ready to receive inputs and execute on them.
            -
      • -
      • -
        -

        index

        -
        public int index()
        -
        +
      + + + +
        +
      • +

        index

        +
        public int index()
      • -
      • -
        -

        providesGradient

        -
        public boolean providesGradient()
        -
        +
      + + + +
        +
      • +

        providesGradient

        +
        public boolean providesGradient()
        +
        +
        Returns:
        +
        The answer of the question : "does this variable/input node return a gradient?"
        +
      • -
      • -
        -

        isFlat

        -
        public boolean isFlat()
        -
        -
        Specified by:
        -
        isFlat in interface Function
        -
        Returns:
        -
        The truth value determining if the sub-functions of this Function do not themselves reference Functions.
        +
      + + + +
        +
      • +

        isFlat

        +
        public boolean isFlat()
        +
        +
        Specified by:
        +
        isFlat in interface Function
        +
        Returns:
        +
        The truth value determining if the sub-functions of this Function do not themselves reference Functions.
        -
    • -
    • -
      -

      isDoingAD

      -
      public boolean isDoingAD()
      -
      Description copied from interface: Function
      -
      Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions - whose Function.isFlat() flag is set to false!
      -
      -
      Specified by:
      -
      isDoingAD in interface Function
      -
      Returns:
      -
      The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
      +
    + + + +
      +
    • +

      isDoingAD

      +
      public boolean isDoingAD()
      +
      Description copied from interface: Function
      +
      Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions + whose Function.isFlat() flag is set to false!
      +
      +
      Specified by:
      +
      isDoingAD in interface Function
      +
      Returns:
      +
      The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
      -
    • -
    • -
      -

      getOperation

      -
      public AbstractOperation getOperation()
      -
      -
      Specified by:
      -
      getOperation in interface Function
      -
      Returns:
      -
      The Operation implementation instance responsible for executing any inputs received by this Function or null if this Function.isFlat().
      +
    + + + + + + + +
      +
    • +

      dependsOn

      +
      public boolean dependsOn(int index)
      +
      Description copied from interface: Function
      Use this to determine if this function directly or indirectly references an input with the provided index.
      -
      -
      Specified by:
      -
      dependsOn in interface Function
      -
      Parameters:
      -
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      -
      Returns:
      -
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      +
      +
      Specified by:
      +
      dependsOn in interface Function
      +
      Parameters:
      +
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      +
      Returns:
      +
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      -
    • -
    • -
      -

      getDerivative

      -
      public Function getDerivative(int index)
      -
      Description copied from interface: Function
      -
      This method builds a new Function which is the derivative of this Function with respect to the provided input index.
      -
      -
      Specified by:
      -
      getDerivative in interface Function
      -
      Parameters:
      +
    + + + +
      +
    • +

      getDerivative

      +
      public Function getDerivative(int index)
      +
      Description copied from interface: Function
      +
      This method builds a new Function which is the derivative of this Function with respect to the provided input index.
      +
      +
      Specified by:
      +
      getDerivative in interface Function
      +
      Parameters:
      index - The index of the input which ought to serve as the variable which ought to be derived.
      -
      Returns:
      -
      The derivative of this Function.
      +
      Returns:
      +
      The derivative of this Function.
      -
    • -
    • -
      -

      getSubFunctions

      -
      public List<Function> getSubFunctions()
      -
      -
      Specified by:
      -
      getSubFunctions in interface Function
      -
      Returns:
      -
      The referenced child Function nodes of this Function AST node.
      +
    + + + +
      +
    • +

      getSubFunctions

      +
      public java.util.List<Function> getSubFunctions()
      +
      +
      Specified by:
      +
      getSubFunctions in interface Function
      +
      Returns:
      +
      The referenced child Function nodes of this Function AST node.
      -
    • -
    • -
      -

      call

      -
      public double call(double[] inputs, - int j)
      -
      Description copied from interface: Function
      -
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. - This method is functionally equivalent to Function.invoke(double[], int).
      -
      -
      Specified by:
      -
      call in interface Function
      -
      Parameters:
      +
    + + + +
      +
    • +

      call

      +
      public double call(double[] inputs,
      +                   int j)
      +
      Description copied from interface: Function
      +
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. + This method is functionally equivalent to Function.invoke(double[], int).
      +
      +
      Specified by:
      +
      call in interface Function
      +
      Parameters:
      inputs - The array of inputs.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
    • -
    • -
      -

      derive

      -
      public double derive(double[] inputs, - int index)
      -
      Description copied from interface: Function
      -
      Calculates the derivative of a particular input with respect to the output of this Function +
    + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int index)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs and an index targeting the input to be derived.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
    • -
    • -
      -

      derive

      -
      public double derive(double[] inputs, - int index, - int j)
      -
      Description copied from interface: Function
      -
      Calculates the derivative of a particular input with respect to the output of this Function +
    + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int index,
      +                     int j)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs, an index targeting the input to be derived and an index for input dependent indexing.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
    • -
    • -
      -

      execute

      -
      public Tensor<?> execute(Args arguments, - Tensor<?>... inputs)
      -
      Description copied from interface: Function
      +
    + + + +
      +
    • +

      execute

      +
      public Tensor<?> execute(Args arguments,
      +                         Tensor<?>... inputs)
      +
      Description copied from interface: Function
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      - Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
      -
      -
      Specified by:
      -
      execute in interface Function
      -
      Parameters:
      + Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
    +
    +
    Specified by:
    +
    execute in interface Function
    +
    Parameters:
    arguments - A set of arguments you want to supply to this function for further control over the execution.
    inputs - The tensors which should be sent through this function.
    -
    Returns:
    +
    Returns:
    The resulting tensor produced by this function.
    - -
  • -
    -

    toString

    -
    public String toString()
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      Description copied from interface: Function
      Turns this function into a string representation which can be used to reconstruct this function or combine it with other function strings to parse entirely new functions...
      -
      -
      Specified by:
      -
      toString in interface Function
      -
      Overrides:
      -
      toString in class Object
      -
      Returns:
      +
      +
      Specified by:
      +
      toString in interface Function
      +
      Overrides:
      +
      toString in class java.lang.Object
      +
      Returns:
      The string representation of this function.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/implementations/FunctionNode.html b/docs/jdocs/neureka/math/implementations/FunctionNode.html index 36ded1d0a..cbf26af8e 100644 --- a/docs/jdocs/neureka/math/implementations/FunctionNode.html +++ b/docs/jdocs/neureka/math/implementations/FunctionNode.html @@ -1,422 +1,582 @@ - + + - -FunctionNode (neureka 1.0.0 API) - - - - + +FunctionNode (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FunctionNode

    -
    -
    java.lang.Object -
    neureka.math.implementations.FunctionNode
    +
    neureka.math.implementations
    +

    Class FunctionNode

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.math.implementations.FunctionNode
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Function
      +
      Function

      -
      public final class FunctionNode -extends Object -implements Function
      -
      The most common type of Function which references other Functions to +
      +
      public final class FunctionNode
      +extends java.lang.Object
      +implements Function
      +
      The most common type of Function which references other Functions to form an abstract syntax tree.
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FunctionNode

        -
        public FunctionNode(Operation type, - List<Function> sources, - boolean doAD)
        -
        -
        Parameters:
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FunctionNode

            +
            public FunctionNode(Operation type,
            +                    java.util.List<Function> sources,
            +                    boolean doAD)
            +
            +
            Parameters:
            type - The operation which ought to be represented.
            sources - The child function nodes of this node.
            doAD - A flag determining if this function should perform autograd.
            -
      -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      toString

      -
      public String toString()
      -
      Description copied from interface: Function
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Description copied from interface: Function
          Turns this function into a string representation which can be used to reconstruct this function or combine it with other function strings to parse entirely new functions...
          -
          -
          Specified by:
          -
          toString in interface Function
          -
          Overrides:
          -
          toString in class Object
          -
          Returns:
          +
          +
          Specified by:
          +
          toString in interface Function
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          The string representation of this function.
          -
    • -
    • -
      -

      dependsOn

      -
      public boolean dependsOn(int index)
      -
      Description copied from interface: Function
      +
    + + + +
      +
    • +

      dependsOn

      +
      public boolean dependsOn(int index)
      +
      Description copied from interface: Function
      Use this to determine if this function directly or indirectly references an input with the provided index.
      -
      -
      Specified by:
      -
      dependsOn in interface Function
      -
      Parameters:
      -
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      -
      Returns:
      -
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      +
      +
      Specified by:
      +
      dependsOn in interface Function
      +
      Parameters:
      +
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      +
      Returns:
      +
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      -
  • -
  • -
    -

    getDerivative

    -
    public Function getDerivative(int index)
    -
    Description copied from interface: Function
    -
    This method builds a new Function which is the derivative of this Function with respect to the provided input index.
    -
    -
    Specified by:
    -
    getDerivative in interface Function
    -
    Parameters:
    + + + + +
      +
    • +

      getDerivative

      +
      public Function getDerivative(int index)
      +
      Description copied from interface: Function
      +
      This method builds a new Function which is the derivative of this Function with respect to the provided input index.
      +
      +
      Specified by:
      +
      getDerivative in interface Function
      +
      Parameters:
      index - The index of the input which ought to serve as the variable which ought to be derived.
      -
      Returns:
      -
      The derivative of this Function.
      +
      Returns:
      +
      The derivative of this Function.
      -
  • -
  • -
    -

    getSubFunctions

    -
    public List<Function> getSubFunctions()
    -
    -
    Specified by:
    -
    getSubFunctions in interface Function
    -
    Returns:
    -
    The referenced child Function nodes of this Function AST node.
    + + + + +
  • -
  • -
    -

    execute

    -
    public Tensor<?> execute(Args arguments, - Tensor<?>... inputs)
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      execute

      +
      public Tensor<?> execute(Args arguments,
      +                         Tensor<?>... inputs)
      +
      Description copied from interface: Function
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      - Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
      -
      -
      Specified by:
      -
      execute in interface Function
      -
      Parameters:
      + Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
  • +
    +
    Specified by:
    +
    execute in interface Function
    +
    Parameters:
    arguments - A set of arguments you want to supply to this function for further control over the execution.
    inputs - The tensors which should be sent through this function.
    -
    Returns:
    +
    Returns:
    The resulting tensor produced by this function.
    - -
  • -
    -

    call

    -
    public double call(double[] inputs, - int j)
    -
    Description copied from interface: Function
    -
    Invokes this Function with the provided array of inputs ad an index for input dependent indexing. - This method is functionally equivalent to Function.invoke(double[], int).
    -
    -
    Specified by:
    -
    call in interface Function
    -
    Parameters:
    + + + + +
      +
    • +

      call

      +
      public double call(double[] inputs,
      +                   int j)
      +
      Description copied from interface: Function
      +
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. + This method is functionally equivalent to Function.invoke(double[], int).
      +
      +
      Specified by:
      +
      call in interface Function
      +
      Parameters:
      inputs - The array of inputs.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
  • -
  • -
    -

    derive

    -
    public double derive(double[] inputs, - int d, - int j)
    -
    Description copied from interface: Function
    -
    Calculates the derivative of a particular input with respect to the output of this Function + + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int d,
      +                     int j)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs, an index targeting the input to be derived and an index for input dependent indexing.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      d - The index of the input to be derived.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
  • -
  • -
    -

    derive

    -
    public double derive(double[] inputs, - int d)
    -
    Description copied from interface: Function
    -
    Calculates the derivative of a particular input with respect to the output of this Function + + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int d)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs and an index targeting the input to be derived.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      d - The index of the input to be derived.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
  • -
  • -
    -

    getOperation

    -
    public Operation getOperation()
    -
    -
    Specified by:
    -
    getOperation in interface Function
    -
    Returns:
    -
    The Operation implementation instance responsible for executing any inputs received by this Function or null if this Function.isFlat().
    + + + + +
  • -
  • -
    -

    isFlat

    -
    public boolean isFlat()
    -
    -
    Specified by:
    -
    isFlat in interface Function
    -
    Returns:
    -
    The truth value determining if the sub-functions of this Function do not themselves reference Functions.
    + + + + +
      +
    • +

      isFlat

      +
      public boolean isFlat()
      +
      +
      Specified by:
      +
      isFlat in interface Function
      +
      Returns:
      +
      The truth value determining if the sub-functions of this Function do not themselves reference Functions.
      -
  • -
  • -
    -

    isDoingAD

    -
    public boolean isDoingAD()
    -
    Description copied from interface: Function
    -
    Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions - whose Function.isFlat() flag is set to false!
    -
    -
    Specified by:
    -
    isDoingAD in interface Function
    -
    Returns:
    -
    The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
    + + + + +
      +
    • +

      isDoingAD

      +
      public boolean isDoingAD()
      +
      Description copied from interface: Function
      +
      Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions + whose Function.isFlat() flag is set to false!
      +
      +
      Specified by:
      +
      isDoingAD in interface Function
      +
      Returns:
      +
      The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/implementations/FunctionVariable.html b/docs/jdocs/neureka/math/implementations/FunctionVariable.html index 21ae76f58..2172e06c5 100644 --- a/docs/jdocs/neureka/math/implementations/FunctionVariable.html +++ b/docs/jdocs/neureka/math/implementations/FunctionVariable.html @@ -1,430 +1,598 @@ - + + - -FunctionVariable (neureka 1.0.0 API) - - - - + +FunctionVariable (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FunctionVariable

    -
    -
    java.lang.Object -
    neureka.math.implementations.FunctionVariable
    +
    neureka.math.implementations
    +

    Class FunctionVariable

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.math.implementations.FunctionVariable
      • +
      +
    • +
    +
    +
      +
    • +
      All Implemented Interfaces:
      -
      Function
      +
      Function

      -
      public final class FunctionVariable -extends Object -implements Function
      -
      Instances of this implementation of the Function interface +
      +
      public final class FunctionVariable
      +extends java.lang.Object
      +implements Function
      +
      Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing indexed inputs to a function. When parsing an expression into a function then these inputs are recognized by the character 'i' or 'I', followed by the character 'j' or 'J' (optionally wrapped by '[' and ']'), which is a placeholder for the index - of the argument within the list/array of arguments passed to a concrete Function instance.
      + of the argument within the list/array of arguments passed to a concrete Function instance.
      So for example, when creating a function by calling the following factory method...

      - Function.of(java.lang.String)( "3 * sum( (I[j] + 4) * I[0] )" )
      + Function.of(java.lang.String)( "3 * sum( (I[j] + 4) * I[0] )" )

      ...then the substrings "I[j]" will be parsed into instances of this class!
      The substring "I[0]" on the other hand will not be parsed into an instance of this class!
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FunctionVariable

        -
        public FunctionVariable(String equation)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FunctionVariable

            +
            public FunctionVariable(java.lang.String equation)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      providesGradient

      -
      public boolean providesGradient()
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          providesGradient

          +
          public boolean providesGradient()
          +
          +
          Returns:
          +
          The answer of the question : "does this variable/input node return a gradient?"
          +
        • -
        • -
          -

          isFlat

          -
          public boolean isFlat()
          -
          -
          Specified by:
          -
          isFlat in interface Function
          -
          Returns:
          -
          The truth value determining if the sub-functions of this Function do not themselves reference Functions.
          +
        + + + +
          +
        • +

          isFlat

          +
          public boolean isFlat()
          +
          +
          Specified by:
          +
          isFlat in interface Function
          +
          Returns:
          +
          The truth value determining if the sub-functions of this Function do not themselves reference Functions.
          -
  • -
  • -
    -

    isDoingAD

    -
    public boolean isDoingAD()
    -
    Description copied from interface: Function
    -
    Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions - whose Function.isFlat() flag is set to false!
    -
    -
    Specified by:
    -
    isDoingAD in interface Function
    -
    Returns:
    -
    The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
    + + + + +
      +
    • +

      isDoingAD

      +
      public boolean isDoingAD()
      +
      Description copied from interface: Function
      +
      Only branch Functions can do autograd / 'Auto-Differentiation', meaning functions + whose Function.isFlat() flag is set to false!
      +
      +
      Specified by:
      +
      isDoingAD in interface Function
      +
      Returns:
      +
      The truth value determining if this Function can perform autograd/auto-differentiation on the input tensors it receives.
      -
  • -
  • -
    -

    getOperation

    -
    public AbstractOperation getOperation()
    -
    -
    Specified by:
    -
    getOperation in interface Function
    -
    Returns:
    -
    The Operation implementation instance responsible for executing any inputs received by this Function or null if this Function.isFlat().
    + + + + +
  • -
  • -
    -

    dependsOn

    -
    public boolean dependsOn(int index)
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      dependsOn

      +
      public boolean dependsOn(int index)
      +
      Description copied from interface: Function
      Use this to determine if this function directly or indirectly references an input with the provided index.
      -
      -
      Specified by:
      -
      dependsOn in interface Function
      -
      Parameters:
      -
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      -
      Returns:
      -
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      +
      +
      Specified by:
      +
      dependsOn in interface Function
      +
      Parameters:
      +
      index - The index which ought to match the input index of a potentially referenced FunctionInput.
      +
      Returns:
      +
      The truth value determining if this Function (or any sub-functions) reference a FunctionInput with the provided index.
      -
  • -
  • -
    -

    getDerivative

    -
    public Function getDerivative(int index)
    -
    Description copied from interface: Function
    -
    This method builds a new Function which is the derivative of this Function with respect to the provided input index.
    -
    -
    Specified by:
    -
    getDerivative in interface Function
    -
    Parameters:
    + + + + +
      +
    • +

      getDerivative

      +
      public Function getDerivative(int index)
      +
      Description copied from interface: Function
      +
      This method builds a new Function which is the derivative of this Function with respect to the provided input index.
      +
      +
      Specified by:
      +
      getDerivative in interface Function
      +
      Parameters:
      index - The index of the input which ought to serve as the variable which ought to be derived.
      -
      Returns:
      -
      The derivative of this Function.
      +
      Returns:
      +
      The derivative of this Function.
      -
  • -
  • -
    -

    getSubFunctions

    -
    public List<Function> getSubFunctions()
    -
    -
    Specified by:
    -
    getSubFunctions in interface Function
    -
    Returns:
    -
    The referenced child Function nodes of this Function AST node.
    + + + + +
  • -
  • -
    -

    call

    -
    public double call(double[] inputs, - int j)
    -
    Description copied from interface: Function
    -
    Invokes this Function with the provided array of inputs ad an index for input dependent indexing. - This method is functionally equivalent to Function.invoke(double[], int).
    -
    -
    Specified by:
    -
    call in interface Function
    -
    Parameters:
    + + + + +
      +
    • +

      call

      +
      public double call(double[] inputs,
      +                   int j)
      +
      Description copied from interface: Function
      +
      Invokes this Function with the provided array of inputs ad an index for input dependent indexing. + This method is functionally equivalent to Function.invoke(double[], int).
      +
      +
      Specified by:
      +
      call in interface Function
      +
      Parameters:
      inputs - The array of inputs.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar result, a single double value.
      -
  • -
  • -
    -

    derive

    -
    public double derive(double[] inputs, - int index)
    -
    Description copied from interface: Function
    -
    Calculates the derivative of a particular input with respect to the output of this Function + + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int index)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs and an index targeting the input to be derived.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
  • -
  • -
    -

    derive

    -
    public double derive(double[] inputs, - int index, - int j)
    -
    Description copied from interface: Function
    -
    Calculates the derivative of a particular input with respect to the output of this Function + + + + +
      +
    • +

      derive

      +
      public double derive(double[] inputs,
      +                     int index,
      +                     int j)
      +
      Description copied from interface: Function
      +
      Calculates the derivative of a particular input with respect to the output of this Function based on the provided array of inputs, an index targeting the input to be derived and an index for input dependent indexing.
      -
      -
      Specified by:
      -
      derive in interface Function
      -
      Parameters:
      +
      +
      Specified by:
      +
      derive in interface Function
      +
      Parameters:
      inputs - The double array of inputs.
      index - The index of the input to be derived.
      j - The index for input dependent indexing.
      -
      Returns:
      +
      Returns:
      The scalar double result, a single double value.
      -
  • -
  • -
    -

    execute

    -
    public Tensor<?> execute(Args arguments, - Tensor<?>... inputs)
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      execute

      +
      public Tensor<?> execute(Args arguments,
      +                         Tensor<?>... inputs)
      +
      Description copied from interface: Function
      Warning: Tensors returned by this method are eligible for deletion when consumed by other functions.
      - Use this to call this Function alongside with some additional meta-arguments - which will be passed to the underlying Operation(s).
      -
      -
      Specified by:
      -
      execute in interface Function
      -
      Parameters:
      + Use this to call this Function alongside with some additional meta-arguments + which will be passed to the underlying Operation(s).
  • +
    +
    Specified by:
    +
    execute in interface Function
    +
    Parameters:
    arguments - A set of arguments you want to supply to this function for further control over the execution.
    inputs - The tensors which should be sent through this function.
    -
    Returns:
    +
    Returns:
    The resulting tensor produced by this function.
    - -
  • -
    -

    toString

    -
    public String toString()
    -
    Description copied from interface: Function
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      Description copied from interface: Function
      Turns this function into a string representation which can be used to reconstruct this function or combine it with other function strings to parse entirely new functions...
      -
      -
      Specified by:
      -
      toString in interface Function
      -
      Overrides:
      -
      toString in class Object
      -
      Returns:
      +
      +
      Specified by:
      +
      toString in interface Function
      +
      Overrides:
      +
      toString in class java.lang.Object
      +
      Returns:
      The string representation of this function.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/implementations/package-frame.html b/docs/jdocs/neureka/math/implementations/package-frame.html new file mode 100644 index 000000000..f51c64fea --- /dev/null +++ b/docs/jdocs/neureka/math/implementations/package-frame.html @@ -0,0 +1,22 @@ + + + + + +neureka.math.implementations (neureka 1.0.1 API) + + + + +

    neureka.math.implementations

    + + + diff --git a/docs/jdocs/neureka/math/implementations/package-summary.html b/docs/jdocs/neureka/math/implementations/package-summary.html index c29d6d81b..6cc916d23 100644 --- a/docs/jdocs/neureka/math/implementations/package-summary.html +++ b/docs/jdocs/neureka/math/implementations/package-summary.html @@ -1,124 +1,176 @@ - + + - -neureka.math.implementations (neureka 1.0.0 API) - - - - + +neureka.math.implementations (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.math.implementations

    -
    -
    -
    package neureka.math.implementations
    -
    +

    Package neureka.math.implementations

    +
    Everything in this package should be considered library-private! DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! Code inside this package or any sub-packages might change frequently...
    -
    -
    -
      -
    • - -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
      -
      Instances of this implementation of the Function interface +

      See: Description

      +
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        FunctionConstant +
        Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing constant numeric values to a function.
        - - -
        -
        Instances of this implementation of the Function interface +
        FunctionInput +
        Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing inputs to a function.
        - - -
        -
        The most common type of Function which references other Functions to +
        FunctionNode +
        The most common type of Function which references other Functions to form an abstract syntax tree.
        - - -
        -
        Instances of this implementation of the Function interface +
        FunctionVariable +
        Instances of this implementation of the Function interface are leave nodes within the abstract syntax tree of a function, representing indexed inputs to a function.
        - - - +
      -
    -
    + + + +

    Package neureka.math.implementations Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    + + + + diff --git a/docs/jdocs/neureka/math/implementations/package-tree.html b/docs/jdocs/neureka/math/implementations/package-tree.html index b657f9fa8..c194c2e72 100644 --- a/docs/jdocs/neureka/math/implementations/package-tree.html +++ b/docs/jdocs/neureka/math/implementations/package-tree.html @@ -1,74 +1,137 @@ - + + - -neureka.math.implementations Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.math.implementations Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.math.implementations

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/math/package-frame.html b/docs/jdocs/neureka/math/package-frame.html new file mode 100644 index 000000000..50f7067b4 --- /dev/null +++ b/docs/jdocs/neureka/math/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.math (neureka 1.0.1 API) + + + + +

    neureka.math

    +
    +

    Interfaces

    + +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/math/package-summary.html b/docs/jdocs/neureka/math/package-summary.html index 51ead3b0e..98aed8602 100644 --- a/docs/jdocs/neureka/math/package-summary.html +++ b/docs/jdocs/neureka/math/package-summary.html @@ -1,131 +1,174 @@ - + + - -neureka.math (neureka 1.0.0 API) - - - - + +neureka.math (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.math

    -
    -
    -
    package neureka.math
    -
    -
      -
    • - -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      -
      Besides the Tensor class, which is the core class of Neureka, this interface and its implementations +

      Package neureka.math

      +
      +
      + -
    -
    + + + + diff --git a/docs/jdocs/neureka/math/package-tree.html b/docs/jdocs/neureka/math/package-tree.html index 1d0ea1676..fbce5f61f 100644 --- a/docs/jdocs/neureka/math/package-tree.html +++ b/docs/jdocs/neureka/math/package-tree.html @@ -1,79 +1,140 @@ - + + - -neureka.math Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.math Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.math

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/math/parsing/FunctionParser.html b/docs/jdocs/neureka/math/parsing/FunctionParser.html index aed578716..40eae5123 100644 --- a/docs/jdocs/neureka/math/parsing/FunctionParser.html +++ b/docs/jdocs/neureka/math/parsing/FunctionParser.html @@ -1,197 +1,311 @@ - + + - -FunctionParser (neureka 1.0.0 API) - - - - + +FunctionParser (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class FunctionParser

    +
    neureka.math.parsing
    +

    Class FunctionParser

    -
    java.lang.Object -
    neureka.math.parsing.FunctionParser
    -
    -
    -
    -
    public class FunctionParser -extends Object
    -
    The FunctionParser takes a BackendContext instance based on which - it builds Function implementation instances, usually by parsing Strings. - The information needed for parsing is being provided by the Operations within the formerly - mentioned BackendContext...
    -
    -
    -
      - +
      +
        +
      • java.lang.Object
      • -
        -

        Constructor Summary

        -
        Constructors
        -
        -
        Constructor
        -
        Description
        - -
         
        +
          +
        • neureka.math.parsing.FunctionParser
        • +
        +
      • +
      +
      +
        +
      • +
        +
        +
        public class FunctionParser
        +extends java.lang.Object
        +
        The FunctionParser takes a BackendContext instance based on which + it builds Function implementation instances, usually by parsing Strings. + The information needed for parsing is being provided by the Operations within the formerly + mentioned BackendContext...
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        FunctionParser

        -
        public FunctionParser(BackendContext context)
        -
        -
        Parameters:
        -
        context - The BackendContext which will be used as a basis to parse new Function - implementation instance from provided String expressions.
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            FunctionParser

            +
            public FunctionParser(BackendContext context)
            +
            +
            Parameters:
            +
            context - The BackendContext which will be used as a basis to parse new Function + implementation instance from provided String expressions.
            -
      -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      parse

      -
      public Function parse(Operation operation, - int numberOfArgs, - boolean doAD)
      -
      -
      Parameters:
      -
      operation - The Operation based on which the Function ought to be created.
      -
      numberOfArgs - The number of arguments the produced Function ought to have.
      -
      doAD - The flag determining if the Function built by this method should perform autograd or not.
      -
      Returns:
      -
      A Function implementation instance which satisfied the supplied parameters.
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          parse

          +
          public Function parse(Operation operation,
          +                      int numberOfArgs,
          +                      boolean doAD)
          +
          +
          Parameters:
          +
          operation - The Operation based on which the Function ought to be created.
          +
          numberOfArgs - The number of arguments the produced Function ought to have.
          +
          doAD - The flag determining if the Function built by this method should perform autograd or not.
          +
          Returns:
          +
          A Function implementation instance which satisfied the supplied parameters.
          -
    • -
    • -
      -

      parse

      -
      public Function parse(String expression, - boolean doAD)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      parse

      +
      public Function parse(java.lang.String expression,
      +                      boolean doAD)
      +
      +
      Parameters:
      expression - contains the function as String provided by the user
      doAD - is used to turn autograd on or off for this function
      -
      Returns:
      +
      Returns:
      the function which has been built from the expression
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/parsing/ParseUtil.html b/docs/jdocs/neureka/math/parsing/ParseUtil.html index 4a91366c0..8aa362015 100644 --- a/docs/jdocs/neureka/math/parsing/ParseUtil.html +++ b/docs/jdocs/neureka/math/parsing/ParseUtil.html @@ -1,246 +1,385 @@ - + + - -ParseUtil (neureka 1.0.0 API) - - - - + +ParseUtil (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ParseUtil

    +
    neureka.math.parsing
    +

    Class ParseUtil

    -
    java.lang.Object -
    neureka.math.parsing.ParseUtil
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.math.parsing.ParseUtil
      • +
      +
    • +
    +
    +
      +

    • -
      public final class ParseUtil -extends Object
      +
      +
      public final class ParseUtil
      +extends java.lang.Object
      Utility for parsing function expressions.
      -
    -
    -
    -
    -

    Methods inherited from class java.lang.Object

    -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    - +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static java.lang.StringassumptionBasedOn(java.lang.String expression) +
        This method tries to find the next best operation String the user might have meant.
        +
        static java.lang.StringcleanedHeadAndTail(java.lang.String exp) 
        static java.lang.StringfindComponentIn(java.lang.String exp, + int index) 
        static java.util.List<java.lang.String>findParametersIn(java.lang.String exp, + int index) 
        static java.lang.StringgroupBy(java.lang.String operation, + java.lang.String currentChain, + java.lang.String currentComponent, + java.lang.String currentOperation) 
        static booleanisAnOperation(java.lang.String operationName) 
        static intnumberOfOperationsWithin(java.util.List<java.lang.String> operations) 
        static java.lang.StringparsedOperation(java.lang.String exp, + int index) 
        static doublesimilarity(java.lang.String s1, + java.lang.String s2) +
        This method estimates the similarity between 2 provided String instances.
        +
        static java.lang.StringunpackAndCorrect(java.lang.String exp) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      - -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        numberOfOperationsWithin

        -
        public static int numberOfOperationsWithin(List<String> operations)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            numberOfOperationsWithin

            +
            public static int numberOfOperationsWithin(java.util.List<java.lang.String> operations)
          • -
          • -
            -

            parsedOperation

            -
            public static String parsedOperation(String exp, - int index)
            -
            +
          + + + +
            +
          • +

            parsedOperation

            +
            public static java.lang.String parsedOperation(java.lang.String exp,
            +                                               int index)
          • -
          • -
            -

            findComponentIn

            -
            public static String findComponentIn(String exp, - int index)
            -
            +
          + + + +
            +
          • +

            findComponentIn

            +
            public static java.lang.String findComponentIn(java.lang.String exp,
            +                                               int index)
          • -
          • -
            -

            findParametersIn

            -
            public static List<String> findParametersIn(String exp, - int index)
            -
            +
          + + + +
            +
          • +

            findParametersIn

            +
            public static java.util.List<java.lang.String> findParametersIn(java.lang.String exp,
            +                                                                int index)
          • -
          • -
            -

            isAnOperation

            -
            public static boolean isAnOperation(String operationName)
            -
            +
          + + + +
            +
          • +

            isAnOperation

            +
            public static boolean isAnOperation(java.lang.String operationName)
          • -
          • -
            -

            groupBy

            -
            public static String groupBy(String operation, - String currentChain, - String currentComponent, - String currentOperation)
            -
            +
          + + + +
            +
          • +

            groupBy

            +
            public static java.lang.String groupBy(java.lang.String operation,
            +                                       java.lang.String currentChain,
            +                                       java.lang.String currentComponent,
            +                                       java.lang.String currentOperation)
          • -
          • -
            -

            cleanedHeadAndTail

            -
            public static String cleanedHeadAndTail(String exp)
            -
            +
          + + + +
            +
          • +

            cleanedHeadAndTail

            +
            public static java.lang.String cleanedHeadAndTail(java.lang.String exp)
          • -
          • -
            -

            unpackAndCorrect

            -
            public static String unpackAndCorrect(String exp)
            -
            +
          + + + +
            +
          • +

            unpackAndCorrect

            +
            public static java.lang.String unpackAndCorrect(java.lang.String exp)
          • -
          • -
            -

            assumptionBasedOn

            -
            public static String assumptionBasedOn(String expression)
            -
            This method tries to find the next best operation String the user might have meant.
            -
            -
            Parameters:
            +
          + + + +
            +
          • +

            assumptionBasedOn

            +
            public static java.lang.String assumptionBasedOn(java.lang.String expression)
            +
            This method tries to find the next best operation String the user might have meant.
            +
            +
            Parameters:
            expression - The expression which should be interpreted as something similar.
            -
            Returns:
            +
            Returns:
            Something similar or null if the expression is not similar enough.
            -
    • -
    • -
      -

      similarity

      -
      public static double similarity(String s1, - String s2)
      -
      This method estimates the similarity between 2 provided String instances.
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      similarity

      +
      public static double similarity(java.lang.String s1,
      +                                java.lang.String s2)
      +
      This method estimates the similarity between 2 provided String instances.
      +
      +
      Parameters:
      s1 - The first string which should be compared to the second string.
      s2 - The second string which should be compared to the first string.
      -
      Returns:
      +
      Returns:
      A similarity score between 0 and 1 where 1 would be 100% similar (equal).
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/math/parsing/package-frame.html b/docs/jdocs/neureka/math/parsing/package-frame.html new file mode 100644 index 000000000..edd6283f6 --- /dev/null +++ b/docs/jdocs/neureka/math/parsing/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.math.parsing (neureka 1.0.1 API) + + + + +

    neureka.math.parsing

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/math/parsing/package-summary.html b/docs/jdocs/neureka/math/parsing/package-summary.html index 1592de7e9..2fcb32b6c 100644 --- a/docs/jdocs/neureka/math/parsing/package-summary.html +++ b/docs/jdocs/neureka/math/parsing/package-summary.html @@ -1,113 +1,161 @@ - + + - -neureka.math.parsing (neureka 1.0.0 API) - - - - + +neureka.math.parsing (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.math.parsing

    -
    -
    -
    package neureka.math.parsing
    -
    -
    Everything in this package should be considered library-private! - DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! - Code inside this package or any sub-packages might change frequently...
    -
    -
    -
    -
    + + + +

    Package neureka.math.parsing Description

    +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    + + + + diff --git a/docs/jdocs/neureka/math/parsing/package-tree.html b/docs/jdocs/neureka/math/parsing/package-tree.html index 9e74e54f5..85527ad29 100644 --- a/docs/jdocs/neureka/math/parsing/package-tree.html +++ b/docs/jdocs/neureka/math/parsing/package-tree.html @@ -1,72 +1,135 @@ - + + - -neureka.math.parsing Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.math.parsing Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.math.parsing

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/Filler.html b/docs/jdocs/neureka/ndim/Filler.html index ca1a6e048..30131e096 100644 --- a/docs/jdocs/neureka/ndim/Filler.html +++ b/docs/jdocs/neureka/ndim/Filler.html @@ -1,137 +1,231 @@ - + + - -Filler (neureka 1.0.0 API) - - - - + +Filler (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.ndim
    -

    Interface Filler<T>

    +
    neureka.ndim
    +

    Interface Filler<T>

    -
    -
    -
    Type Parameters:
    +
    +
    +
      +
    • +
      +
      Type Parameters:
      T - The type parameter determining the type of the supplied values.
      -
      +
      Functional Interface:
      This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.

      -
      @FunctionalInterface -public interface Filler<T>
      +
      +
      @FunctionalInterface
      +public interface Filler<T>
      Implementations of this ought to map the index of a tensor entry to a value which should be placed at that entry position.
      -
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      init(int i, - int[] index)
      -
       
      -
      -
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        init

        -
        T init(int i, - int[] index)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            init

            +
            T init(int i,
            +       int[] index)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/NDConstructor.html b/docs/jdocs/neureka/ndim/NDConstructor.html index f6321f4e8..fbda35dec 100644 --- a/docs/jdocs/neureka/ndim/NDConstructor.html +++ b/docs/jdocs/neureka/ndim/NDConstructor.html @@ -1,186 +1,304 @@ - + + - -NDConstructor (neureka 1.0.0 API) - - - - + +NDConstructor (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.ndim
    -

    Interface NDConstructor

    +
    neureka.ndim
    +

    Interface NDConstructor

    -
    +
    +
    +
      +

    • -
      public interface NDConstructor
      -
    -
    -
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/NDUtil.html b/docs/jdocs/neureka/ndim/NDUtil.html index 49b981e7b..fcef5e7aa 100644 --- a/docs/jdocs/neureka/ndim/NDUtil.html +++ b/docs/jdocs/neureka/ndim/NDUtil.html @@ -1,159 +1,269 @@ - + + - -NDUtil (neureka 1.0.0 API) - - - - + +NDUtil (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.ndim
    -

    Class NDUtil

    +
    neureka.ndim
    +

    Class NDUtil

    -
    java.lang.Object -
    neureka.ndim.NDUtil
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.ndim.NDUtil
      • +
      +
    • +
    +
    +
      +

    • -
      public class NDUtil -extends Object
      +
      +
      public class NDUtil
      +extends java.lang.Object
      Static utility methods for the NDArray.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        NDUtil

        -
        public NDUtil()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            NDUtil

            +
            public NDUtil()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      shapeString

      -
      public static String shapeString(int[] conf)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          shapeString

          +
          public static java.lang.String shapeString(int[] conf)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/NDimensional.html b/docs/jdocs/neureka/ndim/NDimensional.html index 67cafe077..17c40787b 100644 --- a/docs/jdocs/neureka/ndim/NDimensional.html +++ b/docs/jdocs/neureka/ndim/NDimensional.html @@ -1,296 +1,381 @@ - + + - -NDimensional (neureka 1.0.0 API) - - - - + +NDimensional (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.ndim
    -

    Interface NDimensional

    +
    neureka.ndim
    +

    Interface NDimensional

    -
    -
    +
    +
    +
      +
    • +
      All Known Subinterfaces:
      -
      Nda<V>, Tensor<V>
      +
      Nda<V>, Tensor<V>

      -
      public interface NDimensional
      +
      +
      public interface NDimensional
      This interface defines the most essential methods of the nd-array/tensor API, which describe them with respect to their dimensionality.
      - How many dimensions does a tensor/nd-array have? (rank())
      - How many elements does a tensor/nd-array have? (size())
      - What are the sizes of the individual dimensions? (shape(int i))
      + How many dimensions does a tensor/nd-array have? (rank())
      + How many elements does a tensor/nd-array have? (size())
      + What are the sizes of the individual dimensions? (shape(int i))
      ...
      -
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        rank

        -
        default int rank()
        -
        -
        Returns:
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            rank

            +
            default int rank()
            +
            +
            Returns:
            The number of dimensions of this tensor / nd-array.
            -
      • -
      • -
        -

        getRank

        -
        default int getRank()
        -
        -
        Returns:
        +
      + + + +
        +
      • +

        getRank

        +
        default int getRank()
        +
        +
        Returns:
        The number of dimensions of this tensor / nd-array.
        -
    • -
    • -
      -

      shape

      -
      default Shape shape()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      shape

      +
      default Shape shape()
      +
      +
      Returns:
      A list of the dimensions of this tensor / array.
      -
    • -
    • -
      -

      getShape

      -
      default Shape getShape()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getShape

      +
      default Shape getShape()
      +
      +
      Returns:
      A list of the dimensions of this tensor / array.
      -
    • -
    • -
      -

      indicesMap

      -
      default List<Integer> indicesMap()
      -
      +
    + + + +
      +
    • +

      indicesMap

      +
      default java.util.List<java.lang.Integer> indicesMap()
    • -
    • -
      -

      strides

      -
      default List<Integer> strides()
      -
      +
    + + + +
      +
    • +

      strides

      +
      default java.util.List<java.lang.Integer> strides()
    • -
    • -
      -

      spread

      -
      default List<Integer> spread()
      -
      +
    + + + +
      +
    • +

      spread

      +
      default java.util.List<java.lang.Integer> spread()
    • -
    • -
      -

      offset

      -
      default List<Integer> offset()
      +
    + + + +
      +
    • +

      offset

      +
      default java.util.List<java.lang.Integer> offset()
      The offset is the position of a slice within the n-dimensional data array of its parent array. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent array.
      -
    • -
    • -
      -

      getNDConf

      -
      NDConfiguration getNDConf()
      -
      -
      Returns:
      -
      The NDConfiguration implementation instance of this Tensor storing dimensionality information.
      +
    + + + +
      +
    • +

      getNDConf

      +
      NDConfiguration getNDConf()
      +
      +
      Returns:
      +
      The NDConfiguration implementation instance of this Tensor storing dimensionality information.
      -
    • -
    • -
      -

      shape

      -
      default int shape(int i)
      +
    + + + +
      +
    • +

      shape

      +
      default int shape(int i)
      This method receives an axis index and return the size of the targeted axis / dimension. It enables readable access to the shape of this tensor.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the shape dimension size which should be returned.
      -
      Returns:
      +
      Returns:
      The dimension size targeted by the provided dimension index.
      -
    • -
    • -
      -

      size

      -
      default int size()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      size

      +
      default int size()
      +
      +
      Returns:
      The number of elements stored inside the nd-array.
      -
    • -
    • -
      -

      getSize

      -
      default int getSize()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getSize

      +
      default int getSize()
      +
      +
      Returns:
      The number of elements stored inside the nd-array.
      -
    • -
    • -
      -

      indexOfIndex

      -
      default int indexOfIndex(int index)
      +
    + + + +
      +
    • +

      indexOfIndex

      +
      default int indexOfIndex(int index)
      This is a convenience method identical to ndArray.getNDConf().indexOfIndex(i). Use this to calculate the true index for an element in the data array (data array index) based on a provided "user index", or "user array index". @@ -298,55 +383,120 @@

      indexOfIndex

      like for example if the nd-array is a slice of another larger nd-array, or if it is in fact a permuted version of another nd-array. The basis for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
    • -
    • -
      -

      indicesOfIndex

      -
      default int[] indicesOfIndex(int index)
      +
    + + + +
      +
    • +

      indicesOfIndex

      +
      default int[] indicesOfIndex(int index)
      This is a convenience method identical to ndArray.getNDConf().IndicesOfIndex(i). Use this to calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
    • -
    • -
      -

      indexOfIndices

      -
      default int indexOfIndices(int[] indices)
      +
    + + + +
      +
    • +

      indexOfIndices

      +
      default int indexOfIndices(int[] indices)
      This is a convenience method identical to ndArray.getNDConf().indexOfIndices(indices). Use this to calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/AbstractNDC.html b/docs/jdocs/neureka/ndim/config/AbstractNDC.html index 83b4eecc2..2a06afec1 100644 --- a/docs/jdocs/neureka/ndim/config/AbstractNDC.html +++ b/docs/jdocs/neureka/ndim/config/AbstractNDC.html @@ -1,300 +1,450 @@ - + + - -AbstractNDC (neureka 1.0.0 API) - - - - + +AbstractNDC (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AbstractNDC

    +
    neureka.ndim.config
    +

    Class AbstractNDC

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.ndim.config.AbstractNDC
      • +
      +
    • +
    +
    +
    -
    -
      + In these cases tensors can simply share their NDConfiguration instances for memory efficiency.
    + + +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AbstractNDC

        -
        public AbstractNDC()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AbstractNDC

            +
            public AbstractNDC()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      _cacheArray

      -
      protected static int[] _cacheArray(int[] data)
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          _cacheArray

          +
          protected static int[] _cacheArray(int[] data)
          This method receives an int array and returns an int array which can either be the one provided or an array found in the global int array cache residing inside this class. Integer array based configurations are not very large, that is why their state can uniquely be encoded in long keys.
          -
          -
          Parameters:
          +
          +
          Parameters:
          data - The integer array which ought to be cached.
          -
          Returns:
          +
          Returns:
          The provided array or an already present array found in the int array cache.
          -
    • -
    • -
      -

      _cached

      -
      protected static <T extends NDConfiguration> T _cached(T ndc)
      -
      +
    + + + + + + + + + +
      +
    • +

      toString

      +
      public final java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • -
  • -
    -

    _simpleReshape

    -
    protected static NDConfiguration _simpleReshape(int[] newForm, - NDConfiguration ndc)
    -
    + + + + + + + + +
  • +
    +
    Specified by:
    +
    newReshaped in interface NDConfiguration
    +
    Parameters:
    newForm - An array of indices which define how the axis ought to be rearranged.
    -
    Returns:
    -
    A new NDConfiguration which carries the needed information for the permuted view.
    +
    Returns:
    +
    A new NDConfiguration which carries the needed information for the permuted view.
    - -
  • -
    -

    hashCode

    -
    public int hashCode()
    -
    -
    Specified by:
    -
    hashCode in interface NDConfiguration
    -
    Overrides:
    -
    hashCode in class Object
    + + + + +
      +
    • +

      hashCode

      +
      public int hashCode()
      +
      +
      Specified by:
      +
      hashCode in interface NDConfiguration
      +
      Overrides:
      +
      hashCode in class java.lang.Object
      -
  • -
  • -
    -

    equals

    -
    public final boolean equals(Object other)
    -
    -
    Overrides:
    -
    equals in class Object
    + + + + +
      +
    • +

      equals

      +
      public final boolean equals(java.lang.Object other)
      +
      +
      Overrides:
      +
      equals in class java.lang.Object
      -
  • -
  • -
    -

    equals

    -
    public final boolean equals(NDConfiguration ndc)
    -
    -
    Specified by:
    -
    equals in interface NDConfiguration
    + + + + +
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/NDConfiguration.IndexToIndexFunction.html b/docs/jdocs/neureka/ndim/config/NDConfiguration.IndexToIndexFunction.html index 919202fc6..25eb40ef6 100644 --- a/docs/jdocs/neureka/ndim/config/NDConfiguration.IndexToIndexFunction.html +++ b/docs/jdocs/neureka/ndim/config/NDConfiguration.IndexToIndexFunction.html @@ -1,131 +1,225 @@ - + + - -NDConfiguration.IndexToIndexFunction (neureka 1.0.0 API) - - - - + +NDConfiguration.IndexToIndexFunction (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface NDConfiguration.IndexToIndexFunction

    +
    neureka.ndim.config
    +

    Interface NDConfiguration.IndexToIndexFunction

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      int
      -
      map(int i)
      -
       
      -
      -
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        map

        -
        int map(int i)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            map

            +
            int map(int i)
            +
          • +
        -
    - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/NDConfiguration.Layout.html b/docs/jdocs/neureka/ndim/config/NDConfiguration.Layout.html index 39f717098..2f0123653 100644 --- a/docs/jdocs/neureka/ndim/config/NDConfiguration.Layout.html +++ b/docs/jdocs/neureka/ndim/config/NDConfiguration.Layout.html @@ -1,93 +1,127 @@ - + + - -NDConfiguration.Layout (neureka 1.0.0 API) - - - - + +NDConfiguration.Layout (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class NDConfiguration.Layout

    -
    -
    java.lang.Object -
    java.lang.Enum<NDConfiguration.Layout> -
    neureka.ndim.config.NDConfiguration.Layout
    +
    neureka.ndim.config
    +

    Enum NDConfiguration.Layout

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • + +
    • +
    +
    +
    -
    -
    - +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static NDConfiguration.Layout[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static NDConfiguration.Layout[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (NDConfiguration.Layout c : NDConfiguration.Layout.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static NDConfiguration.Layout valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static NDConfiguration.Layout valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • -
  • -
    -

    isCompatible

    -
    public boolean isCompatible(NDConfiguration.Layout other)
    -
    + + + + +
      +
    • +

      isCompatible

      +
      public boolean isCompatible(NDConfiguration.Layout other)
    • -
    • -
      -

      newStridesFor

      -
      public int[] newStridesFor(int[] shape)
      -
      +
    + + + +
      +
    • +

      newStridesFor

      +
      public int[] newStridesFor(int[] shape)
    • -
    • -
      -

      rearrange

      -
      public int[] rearrange(int[] tln, - int[] shape, - int[] newForm)
      -
      +
    + + + +
      +
    • +

      rearrange

      +
      public int[] rearrange(int[] tln,
      +                       int[] shape,
      +                       int[] newForm)
      +
    • +
  • - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/NDConfiguration.Utility.html b/docs/jdocs/neureka/ndim/config/NDConfiguration.Utility.html index 8771f42b0..3f1236f65 100644 --- a/docs/jdocs/neureka/ndim/config/NDConfiguration.Utility.html +++ b/docs/jdocs/neureka/ndim/config/NDConfiguration.Utility.html @@ -1,198 +1,320 @@ - + + - -NDConfiguration.Utility (neureka 1.0.0 API) - - - - + +NDConfiguration.Utility (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class NDConfiguration.Utility

    -
    -
    java.lang.Object -
    neureka.ndim.config.NDConfiguration.Utility
    +
    neureka.ndim.config
    +

    Class NDConfiguration.Utility

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.ndim.config.NDConfiguration.Utility
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing interface:
      -
      NDConfiguration
      +
      NDConfiguration

      -
      public static class NDConfiguration.Utility -extends Object
      +
      +
      public static class NDConfiguration.Utility
      +extends java.lang.Object
      This utility class provides static methods which are helpful for nd-configuration related operations like reshaping, incrementing or decrementing index arrays...
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Utility() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static void
      -
      decrement(int[] indices, - int[] shape)
      -
       
      -
      static void
      -
      increment(int[] indices, - int[] shape)
      -
       
      -
      static int[]
      -
      rearrange(int[] array, - int[] pointers)
      -
       
      -
      static int
      -
      sizeOfShape(int[] shape)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voiddecrement(int[] indices, + int[] shape) 
        static voidincrement(int[] indices, + int[] shape) 
        static int[]rearrange(int[] array, + int[] pointers) 
        static intsizeOfShape(int[] shape) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      - -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Utility

        -
        public Utility()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Utility

            +
            public Utility()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      rearrange

      -
      public static int[] rearrange(int[] array, - int[] pointers)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          rearrange

          +
          public static int[] rearrange(int[] array,
          +                              int[] pointers)
        • -
        • -
          -

          increment

          -
          public static void increment(int[] indices, - int[] shape)
          -
          +
        + + + +
          +
        • +

          increment

          +
          public static void increment(int[] indices,
          +                             int[] shape)
        • -
        • -
          -

          decrement

          -
          public static void decrement(int[] indices, - int[] shape)
          -
          +
        + + + +
          +
        • +

          decrement

          +
          public static void decrement(int[] indices,
          +                             int[] shape)
        • -
        • -
          -

          sizeOfShape

          -
          public static int sizeOfShape(int[] shape)
          -
          +
        + + + +
          +
        • +

          sizeOfShape

          +
          public static int sizeOfShape(int[] shape)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/NDConfiguration.html b/docs/jdocs/neureka/ndim/config/NDConfiguration.html index 0d55a733e..ff95f925f 100644 --- a/docs/jdocs/neureka/ndim/config/NDConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/NDConfiguration.html @@ -1,304 +1,391 @@ - + + - -NDConfiguration (neureka 1.0.0 API) - - - - + +NDConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface NDConfiguration

    -
    -
    -
    +
    neureka.ndim.config
    +

    Interface NDConfiguration

    +
    +
    + +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      Nested Classes
      -
      -
      Modifier and Type
      -
      Interface
      -
      Description
      -
      static interface 
      - -
      -
      Implementations of this are produced and returned by the getIndexToIndexAccessPattern() + -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      default int[]
      - -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Abstract Methods Default Methods 
        Modifier and TypeMethod and Description
        default int[]asInlineArray()
        This method returns an array of flattened arrays which define this nd-configuration in a compact manner.
        - -
        boolean
        - -
         
        - - -
         
        - - -
        +
        booleanequals(NDConfiguration ndc) 
        default NDConfiguration.IndexToIndexFunctiongetIndexToIndexAccessPattern() 
        default NDConfiguration.LayoutgetLayout()
        The layout of most tensors is either row major or column major.
        - -
        default List<NDTrait>
        - -
         
        -
        default boolean
        -
        has(NDTrait trait)
        -
         
        -
        int
        - -
         
        -
        int
        -
        indexOfIndex(int index)
        -
        +
        default java.util.List<NDTrait>getTraits() 
        default booleanhas(NDTrait trait) 
        inthashCode() 
        intindexOfIndex(int index)
        Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
        - -
        int
        -
        indexOfIndices(int[] indices)
        -
        +
        intindexOfIndices(int[] indices)
        The following method calculates the true index for an element in the data array based on a provided index array.
        - -
        int[]
        - -
        +
        int[]indicesMap()
        If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the indicesMap() + within a tensor based on a scalar index x then the indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
        - -
        int
        -
        indicesMap(int i)
        -
        + for every axis of the tensor represented by this NDConfiguration.
        +
        intindicesMap(int i)
        This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
        - -
        int[]
        -
        indicesOfIndex(int index)
        -
        +
        int[]indicesOfIndex(int index)
        The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
        - -
        default boolean
        - -
        -
        NDConfiguration instance where this flag is true +
        default booleanisCompact() +
        NDConfiguration instance where this flag is true will most likely not be slices because they have no offset (all 0) and a compact spread / step array (all 1).
        - -
        default boolean
        - -
        +
        default booleanisSimple()
        The boolean returned by this method simply reports if this configuration is the most basic form of configuration possible for the given shape represented by this instance.
        - -
        default boolean
        - -
         
        - -
        newReshaped(int[] newForm)
        -
        -
        This method enables reshaping for NDConfiguration implementation instances.
        -
        - -
        of(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
        -
         
        -
        int[]
        - -
        +
        default booleanisVirtual() 
        NDConfigurationnewReshaped(int[] newForm) +
        This method enables reshaping for NDConfiguration implementation instances.
        +
        static NDConfigurationnone() 
        static NDConfigurationof(int[] shape, + int[] strides, + int[] indicesMap, + int[] spread, + int[] offset) 
        int[]offset()
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        int
        -
        offset(int i)
        -
        +
        intoffset(int i)
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        int
        - -
        +
        intrank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        - -
        int[]
        - -
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
        +
        int[]shape()
        This method returns an array of axis sizes.
        - -
        int
        -
        shape(int i)
        -
        +
        intshape(int i)
        This method receives an axis index and return the size of the axis.
        - -
        default int
        - -
         
        -
        int[]
        - -
        +
        default intsize() 
        int[]spread()
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        int
        -
        spread(int i)
        -
        +
        intspread(int i)
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        int[]
        - -
        +
        int[]strides()
        The array returned by this method is used to translate an array of axis indices to a single ata array index.
        - -
        int
        -
        strides(int i)
        -
        +
        intstrides(int i)
        This method receives an axis index and returns the translation value for the targeted axis.
        - - - - - +
        +
      • +
    - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        of

        -
        static NDConfiguration of(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
        -
        -
      • -
      • -
        -

        getLayout

        -
        default NDConfiguration.Layout getLayout()
        +
          +
        • + + +

          Method Detail

          + + + + + + + +
            +
          • +

            of

            +
            static NDConfiguration of(int[] shape,
            +                          int[] strides,
            +                          int[] indicesMap,
            +                          int[] spread,
            +                          int[] offset)
            +
          • +
          + + + +
            +
          • +

            getLayout

            +
            default NDConfiguration.Layout getLayout()
            The layout of most tensors is either row major or column major. Row major means that row elements are right next to one another in the underlying data array of a tensor. @@ -306,192 +393,237 @@

            getLayout

            A tensor can also be symmetric, meaning it supports both column major and row major (scalar tensors have this property). Other than that there are also tensors which are unspecific, meaning they are not row major or column major. This is the case for tensors which are slices of other tensors or tensors which have been permuted.
            -
            -
            Returns:
            +
            +
            Returns:
            The layout of the underlying data array of a tensor.
            -
        -
      • -
      • -
        -

        getTraits

        -
        default List<NDTrait> getTraits()
        -
        -
      • -
      • -
        -

        has

        -
        default boolean has(NDTrait trait)
        -
        -
      • -
      • -
        -

        rank

        -
        int rank()
        +
      • +
      + + + +
        +
      • +

        getTraits

        +
        default java.util.List<NDTrait> getTraits()
        +
      • +
      + + + +
        +
      • +

        has

        +
        default boolean has(NDTrait trait)
        +
      • +
      + + + +
        +
      • +

        rank

        +
        int rank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        -
        -
        Returns:
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
    +
    +
    Returns:
    The number of axis of an nd-array.
    - - -
  • -
    -

    size

    -
    default int size()
    -
    -
  • -
  • -
    -

    shape

    -
    int[] shape()
    +
  • + + + + +
      +
    • +

      size

      +
      default int size()
      +
    • +
    + + + +
      +
    • +

      shape

      +
      int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
    • -
    • -
      -

      shape

      -
      int shape(int i)
      +
    + + + +
      +
    • +

      shape

      +
      int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
    • -
    • -
      -

      indicesMap

      -
      int[] indicesMap()
      +
    + + + +
      +
    • +

      indicesMap

      +
      int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the indicesMap() + within a tensor based on a scalar index x then the indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
    +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the indicesMap() + within a tensor based on a scalar index x then the indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    int[] spread()
    + + + + +
      +
    • +

      spread

      +
      int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    int[] offset()
    + + + + +
      +
    • +

      offset

      +
      int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -500,154 +632,243 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    asInlineArray

    -
    default int[] asInlineArray()
    + + + + +
      +
    • +

      asInlineArray

      +
      default int[] asInlineArray()
      This method returns an array of flattened arrays which define this nd-configuration in a compact manner. The array consists of the following arrays joined in the following order: [ shape | translation | indicesMap | offsets | spreads ]
      -
      -
      Returns:
      +
      +
      Returns:
      An array of flattened arrays which define this nd-configuration in a compact manner.
      -
    -
  • -
  • -
    -

    hashCode

    -
    int hashCode()
    -
    -
    Overrides:
    -
    hashCode in class Object
    +
  • + + + + +
      +
    • +

      hashCode

      +
      int hashCode()
      +
      +
      Overrides:
      +
      hashCode in class java.lang.Object
      - -
    • -
    • -
      -

      equals

      -
      boolean equals(NDConfiguration ndc)
      -
      -
    • -
    • -
      -

      newReshaped

      -
      NDConfiguration newReshaped(int[] newForm)
      -
      This method enables reshaping for NDConfiguration implementation instances. - Because NDConfigurations are in essence things which define +
    • +
    + + + + + + + +
      +
    • +

      newReshaped

      +
      NDConfiguration newReshaped(int[] newForm)
      +
      This method enables reshaping for NDConfiguration implementation instances. + Because NDConfigurations are in essence things which define the access relationship from shape indices to the actual underlying data, - the creation of permuted NDConfiguration is up to a specific implementation.
      -
      -
      Parameters:
      + the creation of permuted NDConfiguration is up to a specific implementation.
    +
    +
    Parameters:
    newForm - An array of indices which define how the axis ought to be rearranged.
    -
    Returns:
    -
    A new NDConfiguration which carries the needed information for the permuted view.
    +
    Returns:
    +
    A new NDConfiguration which carries the needed information for the permuted view.
    - -
  • -
    -

    isSimple

    -
    default boolean isSimple()
    + + + + +
      +
    • +

      isSimple

      +
      default boolean isSimple()
      The boolean returned by this method simply reports if this configuration is the most basic form of configuration possible for the given shape represented by this instance. This type of configuration is the typical for freshly created tensors which are neither slices nor permuted variants of an original tensor... - Therefore, such "simple tensors" do not need a fancy NDIterator + Therefore, such "simple tensors" do not need a fancy NDIterator in order to perform operations on them. One can simply iterate over their underlying data array. - (This does not mean that the tensor owning this NDConfiguration is not a slice!)
      -
      -
      Returns:
      + (This does not mean that the tensor owning this NDConfiguration is not a slice!)
  • +
    +
    Returns:
    The truth value determining if this configuration is not modeling more complex indices like permuted views or slices...
    - -
  • -
    -

    isCompact

    -
    default boolean isCompact()
    -
    NDConfiguration instance where this flag is true + + + + +
      +
    • +

      isCompact

      +
      default boolean isCompact()
      +
      NDConfiguration instance where this flag is true will most likely not be slices because they have no offset (all 0) and a compact spread / step array (all 1).
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining if this configuration has no offset and spread/steps larger than 1.
      -
    -
  • -
  • -
    -

    isVirtual

    -
    default boolean isVirtual()
    -
    -
    Returns:
    -
    The truth value determining if this NDConfiguration - represents virtual tensors (see Tensor.isVirtual()).
    +
  • + + + + + + + + +
      +
    • +

      getIndexToIndexAccessPattern

      +
      default NDConfiguration.IndexToIndexFunction getIndexToIndexAccessPattern()
      +
      +
      Returns:
      A function which can map tensor indices to the indices of its data array.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/NDTrait.html b/docs/jdocs/neureka/ndim/config/NDTrait.html index bddea8623..a524468bf 100644 --- a/docs/jdocs/neureka/ndim/config/NDTrait.html +++ b/docs/jdocs/neureka/ndim/config/NDTrait.html @@ -1,249 +1,386 @@ - + + - -NDTrait (neureka 1.0.0 API) - - - - + +NDTrait (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class NDTrait

    -
    -
    java.lang.Object -
    java.lang.Enum<NDTrait> -
    neureka.ndim.config.NDTrait
    -
    +
    neureka.ndim.config
    +

    Enum NDTrait

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<NDTrait>
      • +
      • +
          +
        • neureka.ndim.config.NDTrait
        • +
        +
      • +
      +
    • +
    +
    +
    -
    -
    - +
    +
      +
    • + + -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static NDTrait
      - -
      -
      Returns the enum constant of this class with the specified name.
      -
      -
      static NDTrait[]
      - -
      -
      Returns an array containing the constants of this enum class, in +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static NDTraitvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static NDTrait[]values() +
        Returns an array containing the constants of this enum type, in the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    -
    -
    -
    - -
    -

    Methods inherited from class java.lang.Object

    -getClass, notify, notifyAll, wait, wait, wait
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Enum Constant Details

        -
          -
        • -
          -

          COMPACT

          -
          public static final NDTrait COMPACT
          -
          +
            +
          • + + +

            Enum Constant Detail

            + + + +
              +
            • +

              COMPACT

              +
              public static final NDTrait COMPACT
            • -
            • -
              -

              SIMPLE

              -
              public static final NDTrait SIMPLE
              -
              +
            + + + +
              +
            • +

              SIMPLE

              +
              public static final NDTrait SIMPLE
            • -
            • -
              -

              ROW_MAJOR

              -
              public static final NDTrait ROW_MAJOR
              -
              +
            + + + +
              +
            • +

              ROW_MAJOR

              +
              public static final NDTrait ROW_MAJOR
            • -
            • -
              -

              COL_MAJOR

              -
              public static final NDTrait COL_MAJOR
              -
              +
            + + + +
              +
            • +

              COL_MAJOR

              +
              public static final NDTrait COL_MAJOR
            • -
            • -
              -

              CONTINUOUS_MATRIX

              -
              public static final NDTrait CONTINUOUS_MATRIX
              -
              +
            + + + +
              +
            • +

              CONTINUOUS_MATRIX

              +
              public static final NDTrait CONTINUOUS_MATRIX
            • -
            • -
              -

              OFFSET_MATRIX

              -
              public static final NDTrait OFFSET_MATRIX
              -
              +
            + + + +
              +
            • +

              OFFSET_MATRIX

              +
              public static final NDTrait OFFSET_MATRIX
            -
      • +
      -
    • -
      -

      Method Details

      -
        -
      • -
        -

        values

        -
        public static NDTrait[] values()
        -
        Returns an array containing the constants of this enum class, in -the order they are declared.
        -
        -
        Returns:
        -
        an array containing the constants of this enum class, in the order they are declared
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            values

            +
            public static NDTrait[] values()
            +
            Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
            +for (NDTrait c : NDTrait.values())
            +    System.out.println(c);
            +
            +
            +
            Returns:
            +
            an array containing the constants of this enum type, in the order they are declared
            -
      • -
      • -
        -

        valueOf

        -
        public static NDTrait valueOf(String name)
        -
        Returns the enum constant of this class with the specified name. +
      + + + +
        +
      • +

        valueOf

        +
        public static NDTrait valueOf(java.lang.String name)
        +
        Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
        -
        -
        Parameters:
        +
        +
        Parameters:
        name - the name of the enum constant to be returned.
        -
        Returns:
        +
        Returns:
        the enum constant with the specified name
        -
        Throws:
        -
        IllegalArgumentException - if this enum class has no constant with the specified name
        -
        NullPointerException - if the argument is null
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
        +
        java.lang.NullPointerException - if the argument is null
        -
    -
    - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/package-frame.html b/docs/jdocs/neureka/ndim/config/package-frame.html new file mode 100644 index 000000000..4662390f7 --- /dev/null +++ b/docs/jdocs/neureka/ndim/config/package-frame.html @@ -0,0 +1,30 @@ + + + + + +neureka.ndim.config (neureka 1.0.1 API) + + + + +

    neureka.ndim.config

    + + + diff --git a/docs/jdocs/neureka/ndim/config/package-summary.html b/docs/jdocs/neureka/ndim/config/package-summary.html index 18e35c0fd..a521bcf76 100644 --- a/docs/jdocs/neureka/ndim/config/package-summary.html +++ b/docs/jdocs/neureka/ndim/config/package-summary.html @@ -1,130 +1,198 @@ - + + - -neureka.ndim.config (neureka 1.0.0 API) - - - - + +neureka.ndim.config (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.config

    -
    -
    -
    package neureka.ndim.config
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/package-tree.html b/docs/jdocs/neureka/ndim/config/package-tree.html index fd4bb5447..8fea3f3f3 100644 --- a/docs/jdocs/neureka/ndim/config/package-tree.html +++ b/docs/jdocs/neureka/ndim/config/package-tree.html @@ -1,94 +1,153 @@ - + + - -neureka.ndim.config Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.config Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.config

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/D1C.html b/docs/jdocs/neureka/ndim/config/types/D1C.html index 11ea3859f..4f573b500 100644 --- a/docs/jdocs/neureka/ndim/config/types/D1C.html +++ b/docs/jdocs/neureka/ndim/config/types/D1C.html @@ -1,185 +1,312 @@ - + + - -D1C (neureka 1.0.0 API) - - - - + +D1C (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class D1C

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D1C
    +
    neureka.ndim.config.types
    +

    Class D1C

    -
    -
    -
    +
    + +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        D1C

        -
        public D1C()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            D1C

            +
            public D1C()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      indexOfIndices

      -
      public abstract int indexOfIndices(int d1)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          indexOfIndices

          +
          public abstract int indexOfIndices(int d1)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/D2C.html b/docs/jdocs/neureka/ndim/config/types/D2C.html index 55113374d..77b5f9271 100644 --- a/docs/jdocs/neureka/ndim/config/types/D2C.html +++ b/docs/jdocs/neureka/ndim/config/types/D2C.html @@ -1,187 +1,314 @@ - + + - -D2C (neureka 1.0.0 API) - - - - + +D2C (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class D2C

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D2C
    +
    neureka.ndim.config.types
    +

    Class D2C

    -
    -
    -
    +
    + +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        D2C

        -
        public D2C()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            D2C

            +
            public D2C()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      indexOfIndices

      -
      public abstract int indexOfIndices(int d1, - int d2)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          indexOfIndices

          +
          public abstract int indexOfIndices(int d1,
          +                                   int d2)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/D3C.html b/docs/jdocs/neureka/ndim/config/types/D3C.html index bdaa592e7..d287f8349 100644 --- a/docs/jdocs/neureka/ndim/config/types/D3C.html +++ b/docs/jdocs/neureka/ndim/config/types/D3C.html @@ -1,189 +1,316 @@ - + + - -D3C (neureka 1.0.0 API) - - - - + +D3C (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class D3C

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D3C
    +
    neureka.ndim.config.types
    +

    Class D3C

    -
    -
    -
    +
    + +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        D3C

        -
        public D3C()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            D3C

            +
            public D3C()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      indexOfIndices

      -
      public abstract int indexOfIndices(int d1, - int d2, - int d3)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          indexOfIndices

          +
          public abstract int indexOfIndices(int d1,
          +                                   int d2,
          +                                   int d3)
          +
        • +
      -
  • - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/package-frame.html b/docs/jdocs/neureka/ndim/config/types/package-frame.html new file mode 100644 index 000000000..dd973bd4d --- /dev/null +++ b/docs/jdocs/neureka/ndim/config/types/package-frame.html @@ -0,0 +1,21 @@ + + + + + +neureka.ndim.config.types (neureka 1.0.1 API) + + + + +

    neureka.ndim.config.types

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/ndim/config/types/package-summary.html b/docs/jdocs/neureka/ndim/config/types/package-summary.html index a0715d6b2..ddd573ddf 100644 --- a/docs/jdocs/neureka/ndim/config/types/package-summary.html +++ b/docs/jdocs/neureka/ndim/config/types/package-summary.html @@ -1,114 +1,156 @@ - + + - -neureka.ndim.config.types (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.config.types

    +

    Package neureka.ndim.config.types

    -
    -
    package neureka.ndim.config.types
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/package-tree.html b/docs/jdocs/neureka/ndim/config/types/package-tree.html index 091d1d4f2..67a02ac5a 100644 --- a/docs/jdocs/neureka/ndim/config/types/package-tree.html +++ b/docs/jdocs/neureka/ndim/config/types/package-tree.html @@ -1,77 +1,140 @@ - + + - -neureka.ndim.config.types Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.config.types

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

      -
    • java.lang.Object +
    • java.lang.Object
        -
      • neureka.ndim.config.AbstractNDC (implements neureka.ndim.config.NDConfiguration) +
      • neureka.ndim.config.AbstractNDC (implements neureka.ndim.config.NDConfiguration)
          -
        • neureka.ndim.config.types.D1C
        • -
        • neureka.ndim.config.types.D2C
        • -
        • neureka.ndim.config.types.D3C
        • +
        • neureka.ndim.config.types.D1C
        • +
        • neureka.ndim.config.types.D2C
        • +
        • neureka.ndim.config.types.D3C
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/permuted/Permuted1DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/permuted/Permuted1DConfiguration.html index be9f4596a..66a85693c 100644 --- a/docs/jdocs/neureka/ndim/config/types/permuted/Permuted1DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/permuted/Permuted1DConfiguration.html @@ -1,467 +1,596 @@ - + + - -Permuted1DConfiguration (neureka 1.0.0 API) - - - - + +Permuted1DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Permuted1DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D1C -
    neureka.ndim.config.types.permuted.Permuted1DConfiguration
    +
    neureka.ndim.config.types.permuted
    +

    Class Permuted1DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Permuted1DConfiguration
      +extends D1C
      + +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Permuted1DConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Permuted1DConfiguration construct(int[] shape,
          +                                                int[] strides,
          +                                                int[] indicesMap)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -470,63 +599,131 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1)
    -
    -
    Specified by:
    -
    indexOfIndices in class D1C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1)
      +
      +
      Specified by:
      +
      indexOfIndices in class D1C
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/permuted/Permuted2DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/permuted/Permuted2DConfiguration.html index dd1d7be61..7cbaebd6b 100644 --- a/docs/jdocs/neureka/ndim/config/types/permuted/Permuted2DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/permuted/Permuted2DConfiguration.html @@ -1,481 +1,614 @@ - + + - -Permuted2DConfiguration (neureka 1.0.0 API) - - - - + +Permuted2DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Permuted2DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D2C -
    neureka.ndim.config.types.permuted.Permuted2DConfiguration
    +
    neureka.ndim.config.types.permuted
    +

    Class Permuted2DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Permuted2DConfiguration
      +extends D2C
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from interface neureka.ndim.config.NDConfiguration

      -NDConfiguration.IndexToIndexFunction, NDConfiguration.Layout, NDConfiguration.Utility
      -
      + -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final int
      - -
      +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected int_shape1
        The shape of the NDArray.
        - -
        protected final int
        - -
         
        - - +
        protected int_shape2 
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      -
      Permuted2DConfiguration(int[] shape, - int[] strides, - int[] indicesMap)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected Permuted2DConfiguration(int[] shape, + int[] strides, + int[] indicesMap) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      construct(int[] shape, - int[] strides, - int[] indicesMap)
      -
       
      -
      final int
      -
      indexOfIndex(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Permuted2DConfigurationconstruct(int[] shape, + int[] strides, + int[] indicesMap) 
        intindexOfIndex(int index)
        Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
        - -
        final int
        -
        indexOfIndices(int[] indices)
        -
        +
        intindexOfIndices(int[] indices)
        The following method calculates the true index for an element in the data array based on a provided index array.
        - -
        final int
        -
        indexOfIndices(int d1, - int d2)
        -
        -
        final int[]
        - -
        +
        intindexOfIndices(int d1, + int d2)
        int[]indicesMap()
        If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
        - -
        final int
        -
        indicesMap(int i)
        -
        + for every axis of the tensor represented by this NDConfiguration.
        +
        intindicesMap(int i)
        This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
        - -
        final int[]
        -
        indicesOfIndex(int index)
        -
        +
        int[]indicesOfIndex(int index)
        The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
        - -
        final int[]
        - -
        +
        int[]offset()
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        offset(int i)
        -
        +
        intoffset(int i)
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        - -
        +
        intrank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        - -
        final int[]
        - -
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
        +
        int[]shape()
        This method returns an array of axis sizes.
        - -
        final int
        -
        shape(int i)
        -
        +
        intshape(int i)
        This method receives an axis index and return the size of the axis.
        - -
        final int[]
        - -
        +
        int[]spread()
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        spread(int i)
        -
        +
        intspread(int i)
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int[]
        - -
        +
        int[]strides()
        The array returned by this method is used to translate an array of axis indices to a single ata array index.
        - -
        final int
        -
        strides(int i)
        -
        +
        intstrides(int i)
        This method receives an axis index and returns the translation value for the targeted axis.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        + +
      • +
      +
    • +
    -
    -
    -
    -
    -

    Methods inherited from class neureka.ndim.config.AbstractNDC

    -_cacheArray, _cached, _simpleReshape, equals, equals, hashCode, newReshaped, toString
    -
    -

    Methods inherited from class java.lang.Object

    -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.ndim.config.NDConfiguration

    -asInlineArray, getIndexToIndexAccessPattern, getLayout, getTraits, has, isCompact, isSimple, isVirtual, size
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          _shape1

          -
          protected final int _shape1
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              _shape1

              +
              protected final int _shape1
              The shape of the NDArray.
              -
        • -
        • -
          -

          _shape2

          -
          protected final int _shape2
          -
          +
        + + + +
          +
        • +

          _shape2

          +
          protected final int _shape2
        -
      • +
      -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Permuted2DConfiguration

        -
        protected Permuted2DConfiguration(int[] shape, - int[] strides, - int[] indicesMap)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Permuted2DConfiguration

            +
            protected Permuted2DConfiguration(int[] shape,
            +                                  int[] strides,
            +                                  int[] indicesMap)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Permuted2DConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Permuted2DConfiguration construct(int[] shape,
          +                                                int[] strides,
          +                                                int[] indicesMap)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -484,64 +617,132 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1, - int d2)
    -
    -
    Specified by:
    -
    indexOfIndices in class D2C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1,
      +                                int d2)
      +
      +
      Specified by:
      +
      indexOfIndices in class D2C
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/permuted/Permuted3DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/permuted/Permuted3DConfiguration.html index 0d21edf4b..2ee528f41 100644 --- a/docs/jdocs/neureka/ndim/config/types/permuted/Permuted3DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/permuted/Permuted3DConfiguration.html @@ -1,491 +1,628 @@ - + + - -Permuted3DConfiguration (neureka 1.0.0 API) - - - - + +Permuted3DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Permuted3DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D3C -
    neureka.ndim.config.types.permuted.Permuted3DConfiguration
    +
    neureka.ndim.config.types.permuted
    +

    Class Permuted3DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Permuted3DConfiguration
      +extends D3C
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from interface neureka.ndim.config.NDConfiguration

      -NDConfiguration.IndexToIndexFunction, NDConfiguration.Layout, NDConfiguration.Utility
      -
      + -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final int
      - -
      +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected int_shape1
        The shape of the NDArray.
        - -
        protected final int
        - -
         
        -
        protected final int
        - -
         
        - - +
        protected int_shape2 
        protected int_shape3 
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      -
      Permuted3DConfiguration(int[] shape, - int[] strides, - int[] indicesMap)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected Permuted3DConfiguration(int[] shape, + int[] strides, + int[] indicesMap) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      construct(int[] shape, - int[] strides, - int[] indicesMap)
      -
       
      -
      final int
      -
      indexOfIndex(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Permuted3DConfigurationconstruct(int[] shape, + int[] strides, + int[] indicesMap) 
        intindexOfIndex(int index)
        Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
        - -
        final int
        -
        indexOfIndices(int[] indices)
        -
        +
        intindexOfIndices(int[] indices)
        The following method calculates the true index for an element in the data array based on a provided index array.
        - -
        final int
        -
        indexOfIndices(int d1, - int d2, - int d3)
        -
        -
        final int[]
        - -
        +
        intindexOfIndices(int d1, + int d2, + int d3)
        int[]indicesMap()
        If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
        - -
        final int
        -
        indicesMap(int i)
        -
        + for every axis of the tensor represented by this NDConfiguration.
        +
        intindicesMap(int i)
        This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
        - -
        final int[]
        -
        indicesOfIndex(int index)
        -
        +
        int[]indicesOfIndex(int index)
        The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
        - -
        final int[]
        - -
        +
        int[]offset()
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        offset(int i)
        -
        +
        intoffset(int i)
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        - -
        +
        intrank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        - -
        final int[]
        - -
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
        +
        int[]shape()
        This method returns an array of axis sizes.
        - -
        final int
        -
        shape(int i)
        -
        +
        intshape(int i)
        This method receives an axis index and return the size of the axis.
        - -
        final int[]
        - -
        +
        int[]spread()
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        spread(int i)
        -
        +
        intspread(int i)
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int[]
        - -
        +
        int[]strides()
        The array returned by this method is used to translate an array of axis indices to a single ata array index.
        - -
        final int
        -
        strides(int i)
        -
        +
        intstrides(int i)
        This method receives an axis index and returns the translation value for the targeted axis.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        + +
      • +
      +
    • +
    -
    -
    -
    -
    -

    Methods inherited from class neureka.ndim.config.AbstractNDC

    -_cacheArray, _cached, _simpleReshape, equals, equals, hashCode, newReshaped, toString
    -
    -

    Methods inherited from class java.lang.Object

    -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.ndim.config.NDConfiguration

    -asInlineArray, getIndexToIndexAccessPattern, getLayout, getTraits, has, isCompact, isSimple, isVirtual, size
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          _shape1

          -
          protected final int _shape1
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              _shape1

              +
              protected final int _shape1
              The shape of the NDArray.
              -
        • -
        • -
          -

          _shape2

          -
          protected final int _shape2
          -
          +
        + + + +
          +
        • +

          _shape2

          +
          protected final int _shape2
        • -
        • -
          -

          _shape3

          -
          protected final int _shape3
          -
          +
        + + + +
          +
        • +

          _shape3

          +
          protected final int _shape3
        -
      • +
      -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Permuted3DConfiguration

        -
        protected Permuted3DConfiguration(int[] shape, - int[] strides, - int[] indicesMap)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Permuted3DConfiguration

            +
            protected Permuted3DConfiguration(int[] shape,
            +                                  int[] strides,
            +                                  int[] indicesMap)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Permuted3DConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Permuted3DConfiguration construct(int[] shape,
          +                                                int[] strides,
          +                                                int[] indicesMap)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -494,65 +631,133 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1, - int d2, - int d3)
    -
    -
    Specified by:
    -
    indexOfIndices in class D3C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1,
      +                                int d2,
      +                                int d3)
      +
      +
      Specified by:
      +
      indexOfIndices in class D3C
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/permuted/PermutedNDConfiguration.html b/docs/jdocs/neureka/ndim/config/types/permuted/PermutedNDConfiguration.html index 6b08f445a..01950fa15 100644 --- a/docs/jdocs/neureka/ndim/config/types/permuted/PermutedNDConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/permuted/PermutedNDConfiguration.html @@ -1,430 +1,548 @@ - + + - -PermutedNDConfiguration (neureka 1.0.0 API) - - - - + +PermutedNDConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class PermutedNDConfiguration

    +
    neureka.ndim.config.types.permuted
    +

    Class PermutedNDConfiguration

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.permuted.PermutedNDConfiguration
    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class PermutedNDConfiguration
      +extends AbstractNDC
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        PermutedNDConfiguration

        -
        protected PermutedNDConfiguration(int[] shape, - int[] strides, - int[] indicesMap)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            PermutedNDConfiguration

            +
            protected PermutedNDConfiguration(int[] shape,
            +                                  int[] strides,
            +                                  int[] indicesMap)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static PermutedNDConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static PermutedNDConfiguration construct(int[] shape,
          +                                                int[] strides,
          +                                                int[] indicesMap)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -433,53 +551,118 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/permuted/package-frame.html b/docs/jdocs/neureka/ndim/config/types/permuted/package-frame.html new file mode 100644 index 000000000..bdbdea368 --- /dev/null +++ b/docs/jdocs/neureka/ndim/config/types/permuted/package-frame.html @@ -0,0 +1,22 @@ + + + + + +neureka.ndim.config.types.permuted (neureka 1.0.1 API) + + + + +

    neureka.ndim.config.types.permuted

    + + + diff --git a/docs/jdocs/neureka/ndim/config/types/permuted/package-summary.html b/docs/jdocs/neureka/ndim/config/types/permuted/package-summary.html index d19ba1498..930ea9d39 100644 --- a/docs/jdocs/neureka/ndim/config/types/permuted/package-summary.html +++ b/docs/jdocs/neureka/ndim/config/types/permuted/package-summary.html @@ -1,105 +1,151 @@ - + + - -neureka.ndim.config.types.permuted (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.permuted (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.config.types.permuted

    -
    -
    -
    package neureka.ndim.config.types.permuted
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/ndim/config/types/permuted/package-tree.html b/docs/jdocs/neureka/ndim/config/types/permuted/package-tree.html index 2008bdf92..81dc773bd 100644 --- a/docs/jdocs/neureka/ndim/config/types/permuted/package-tree.html +++ b/docs/jdocs/neureka/ndim/config/types/permuted/package-tree.html @@ -1,90 +1,153 @@ - + + - -neureka.ndim.config.types.permuted Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.permuted Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.config.types.permuted

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/Simple0DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/simple/Simple0DConfiguration.html index 550111a22..dba3af994 100644 --- a/docs/jdocs/neureka/ndim/config/types/simple/Simple0DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/simple/Simple0DConfiguration.html @@ -1,393 +1,504 @@ - + + - -Simple0DConfiguration (neureka 1.0.0 API) - - - - + +Simple0DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Simple0DConfiguration

    +
    neureka.ndim.config.types.simple
    +

    Class Simple0DConfiguration

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.simple.Simple0DConfiguration
    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public final class Simple0DConfiguration
      +extends AbstractNDC
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        construct

        -
        public static Simple0DConfiguration construct()
        -
        +
          +
        • + + +

          Method Detail

          + + + + + + + +
            +
          • +

            rank

            +
            public int rank()
            This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
            -
            -
            Returns:
            + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
    +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -396,53 +507,118 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/Simple1DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/simple/Simple1DConfiguration.html index b78e1f224..2cfa94595 100644 --- a/docs/jdocs/neureka/ndim/config/types/simple/Simple1DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/simple/Simple1DConfiguration.html @@ -1,467 +1,596 @@ - + + - -Simple1DConfiguration (neureka 1.0.0 API) - - - - + +Simple1DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Simple1DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D1C -
    neureka.ndim.config.types.simple.Simple1DConfiguration
    +
    neureka.ndim.config.types.simple
    +

    Class Simple1DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Simple1DConfiguration
      +extends D1C
      + +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Simple1DConfiguration construct(int[] shape, - int[] strides)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Simple1DConfiguration construct(int[] shape,
          +                                              int[] strides)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -470,63 +599,131 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1)
    -
    -
    Specified by:
    -
    indexOfIndices in class D1C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1)
      +
      +
      Specified by:
      +
      indexOfIndices in class D1C
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/Simple2DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/simple/Simple2DConfiguration.html index a5b64ea0f..b3c8eb4e7 100644 --- a/docs/jdocs/neureka/ndim/config/types/simple/Simple2DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/simple/Simple2DConfiguration.html @@ -1,477 +1,610 @@ - + + - -Simple2DConfiguration (neureka 1.0.0 API) - - - - + +Simple2DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Simple2DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D2C -
    neureka.ndim.config.types.simple.Simple2DConfiguration
    +
    neureka.ndim.config.types.simple
    +

    Class Simple2DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Simple2DConfiguration
      +extends D2C
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from interface neureka.ndim.config.NDConfiguration

      -NDConfiguration.IndexToIndexFunction, NDConfiguration.Layout, NDConfiguration.Utility
      -
      + -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final int
      - -
      +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected int_shape1
        The shape of the NDArray.
        - -
        protected final int
        - -
         
        - - +
        protected int_shape2 
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      -
      Simple2DConfiguration(int[] shape, - int[] strides)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected Simple2DConfiguration(int[] shape, + int[] strides) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      construct(int[] shape, - int[] strides)
      -
       
      -
      final int
      -
      indexOfIndex(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Simple2DConfigurationconstruct(int[] shape, + int[] strides) 
        intindexOfIndex(int index)
        Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
        - -
        final int
        -
        indexOfIndices(int[] indices)
        -
        +
        intindexOfIndices(int[] indices)
        The following method calculates the true index for an element in the data array based on a provided index array.
        - -
        final int
        -
        indexOfIndices(int d1, - int d2)
        -
        -
        final int[]
        - -
        +
        intindexOfIndices(int d1, + int d2)
        int[]indicesMap()
        If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
        - -
        final int
        -
        indicesMap(int i)
        -
        + for every axis of the tensor represented by this NDConfiguration.
        +
        intindicesMap(int i)
        This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
        - -
        final int[]
        -
        indicesOfIndex(int index)
        -
        +
        int[]indicesOfIndex(int index)
        The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
        - -
        final int[]
        - -
        +
        int[]offset()
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        offset(int i)
        -
        +
        intoffset(int i)
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        - -
        +
        intrank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        - -
        final int[]
        - -
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
        +
        int[]shape()
        This method returns an array of axis sizes.
        - -
        final int
        -
        shape(int i)
        -
        +
        intshape(int i)
        This method receives an axis index and return the size of the axis.
        - -
        final int[]
        - -
        +
        int[]spread()
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        spread(int i)
        -
        +
        intspread(int i)
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int[]
        - -
        +
        int[]strides()
        The array returned by this method is used to translate an array of axis indices to a single ata array index.
        - -
        final int
        -
        strides(int i)
        -
        +
        intstrides(int i)
        This method receives an axis index and returns the translation value for the targeted axis.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        + +
      • +
      +
    • +
    -
    -
    -
    -
    -

    Methods inherited from class neureka.ndim.config.AbstractNDC

    -_cacheArray, _cached, _simpleReshape, equals, equals, hashCode, newReshaped, toString
    -
    -

    Methods inherited from class java.lang.Object

    -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.ndim.config.NDConfiguration

    -asInlineArray, getIndexToIndexAccessPattern, getLayout, getTraits, has, isCompact, isSimple, isVirtual, size
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          _shape1

          -
          protected final int _shape1
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              _shape1

              +
              protected final int _shape1
              The shape of the NDArray.
              -
        • -
        • -
          -

          _shape2

          -
          protected final int _shape2
          -
          +
        + + + +
          +
        • +

          _shape2

          +
          protected final int _shape2
        -
      • +
      -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Simple2DConfiguration

        -
        protected Simple2DConfiguration(int[] shape, - int[] strides)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Simple2DConfiguration

            +
            protected Simple2DConfiguration(int[] shape,
            +                                int[] strides)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Simple2DConfiguration construct(int[] shape, - int[] strides)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Simple2DConfiguration construct(int[] shape,
          +                                              int[] strides)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -480,64 +613,132 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1, - int d2)
    -
    -
    Specified by:
    -
    indexOfIndices in class D2C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1,
      +                                int d2)
      +
      +
      Specified by:
      +
      indexOfIndices in class D2C
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/Simple3DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/simple/Simple3DConfiguration.html index 0abe5f906..df78d37fb 100644 --- a/docs/jdocs/neureka/ndim/config/types/simple/Simple3DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/simple/Simple3DConfiguration.html @@ -1,487 +1,624 @@ - + + - -Simple3DConfiguration (neureka 1.0.0 API) - - - - + +Simple3DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Simple3DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D3C -
    neureka.ndim.config.types.simple.Simple3DConfiguration
    +
    neureka.ndim.config.types.simple
    +

    Class Simple3DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Simple3DConfiguration
      +extends D3C
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from interface neureka.ndim.config.NDConfiguration

      -NDConfiguration.IndexToIndexFunction, NDConfiguration.Layout, NDConfiguration.Utility
      -
      + -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final int
      - -
      +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected int_shape1
        The shape of the NDArray.
        - -
        protected final int
        - -
         
        -
        protected final int
        - -
         
        - - +
        protected int_shape2 
        protected int_shape3 
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      -
      Simple3DConfiguration(int[] shape, - int[] strides)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected Simple3DConfiguration(int[] shape, + int[] strides) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      construct(int[] shape, - int[] strides)
      -
       
      -
      final int
      -
      indexOfIndex(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Simple3DConfigurationconstruct(int[] shape, + int[] strides) 
        intindexOfIndex(int index)
        Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
        - -
        final int
        -
        indexOfIndices(int[] indices)
        -
        +
        intindexOfIndices(int[] indices)
        The following method calculates the true index for an element in the data array based on a provided index array.
        - -
        final int
        -
        indexOfIndices(int d1, - int d2, - int d3)
        -
        -
        final int[]
        - -
        +
        intindexOfIndices(int d1, + int d2, + int d3)
        int[]indicesMap()
        If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
        - -
        final int
        -
        indicesMap(int i)
        -
        + for every axis of the tensor represented by this NDConfiguration.
        +
        intindicesMap(int i)
        This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
        - -
        final int[]
        -
        indicesOfIndex(int index)
        -
        +
        int[]indicesOfIndex(int index)
        The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
        - -
        final int[]
        - -
        +
        int[]offset()
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        offset(int i)
        -
        +
        intoffset(int i)
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        - -
        +
        intrank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        - -
        final int[]
        - -
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
        +
        int[]shape()
        This method returns an array of axis sizes.
        - -
        final int
        -
        shape(int i)
        -
        +
        intshape(int i)
        This method receives an axis index and return the size of the axis.
        - -
        final int[]
        - -
        +
        int[]spread()
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        spread(int i)
        -
        +
        intspread(int i)
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int[]
        - -
        +
        int[]strides()
        The array returned by this method is used to translate an array of axis indices to a single ata array index.
        - -
        final int
        -
        strides(int i)
        -
        +
        intstrides(int i)
        This method receives an axis index and returns the translation value for the targeted axis.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        + +
      • +
      +
    • +
    -
    -
    -
    -
    -

    Methods inherited from class neureka.ndim.config.AbstractNDC

    -_cacheArray, _cached, _simpleReshape, equals, equals, hashCode, newReshaped, toString
    -
    -

    Methods inherited from class java.lang.Object

    -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.ndim.config.NDConfiguration

    -asInlineArray, getIndexToIndexAccessPattern, getLayout, getTraits, has, isCompact, isSimple, isVirtual, size
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          _shape1

          -
          protected final int _shape1
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              _shape1

              +
              protected final int _shape1
              The shape of the NDArray.
              -
        • -
        • -
          -

          _shape2

          -
          protected final int _shape2
          -
          +
        + + + +
          +
        • +

          _shape2

          +
          protected final int _shape2
        • -
        • -
          -

          _shape3

          -
          protected final int _shape3
          -
          +
        + + + +
          +
        • +

          _shape3

          +
          protected final int _shape3
        -
      • +
      -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Simple3DConfiguration

        -
        protected Simple3DConfiguration(int[] shape, - int[] strides)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Simple3DConfiguration

            +
            protected Simple3DConfiguration(int[] shape,
            +                                int[] strides)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Simple3DConfiguration construct(int[] shape, - int[] strides)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Simple3DConfiguration construct(int[] shape,
          +                                              int[] strides)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -490,65 +627,133 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1, - int d2, - int d3)
    -
    -
    Specified by:
    -
    indexOfIndices in class D3C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1,
      +                                int d2,
      +                                int d3)
      +
      +
      Specified by:
      +
      indexOfIndices in class D3C
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/SimpleNDConfiguration.html b/docs/jdocs/neureka/ndim/config/types/simple/SimpleNDConfiguration.html index 30f32a8c9..8d1403c64 100644 --- a/docs/jdocs/neureka/ndim/config/types/simple/SimpleNDConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/simple/SimpleNDConfiguration.html @@ -1,426 +1,544 @@ - + + - -SimpleNDConfiguration (neureka 1.0.0 API) - - - - + +SimpleNDConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SimpleNDConfiguration

    +
    neureka.ndim.config.types.simple
    +

    Class SimpleNDConfiguration

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.simple.SimpleNDConfiguration
    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public final class SimpleNDConfiguration
      +extends AbstractNDC
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SimpleNDConfiguration

        -
        protected SimpleNDConfiguration(int[] shape, - int[] strides)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SimpleNDConfiguration

            +
            protected SimpleNDConfiguration(int[] shape,
            +                                int[] strides)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static SimpleNDConfiguration construct(int[] shape, - int[] strides)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static SimpleNDConfiguration construct(int[] shape,
          +                                              int[] strides)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration. +
      +
      Parameters:
      i - The index of the axis whose indices map value ought to be returned.
      -
      Returns:
      +
      Returns:
      The indices map value targeted by the provided index.
      -
  • -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -429,53 +547,118 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/package-frame.html b/docs/jdocs/neureka/ndim/config/types/simple/package-frame.html new file mode 100644 index 000000000..477a77dc6 --- /dev/null +++ b/docs/jdocs/neureka/ndim/config/types/simple/package-frame.html @@ -0,0 +1,23 @@ + + + + + +neureka.ndim.config.types.simple (neureka 1.0.1 API) + + + + +

    neureka.ndim.config.types.simple

    + + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/package-summary.html b/docs/jdocs/neureka/ndim/config/types/simple/package-summary.html index 83c4e53d6..89359a276 100644 --- a/docs/jdocs/neureka/ndim/config/types/simple/package-summary.html +++ b/docs/jdocs/neureka/ndim/config/types/simple/package-summary.html @@ -1,107 +1,155 @@ - + + - -neureka.ndim.config.types.simple (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.simple (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.config.types.simple

    -
    -
    -
    package neureka.ndim.config.types.simple
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/ndim/config/types/simple/package-tree.html b/docs/jdocs/neureka/ndim/config/types/simple/package-tree.html index 4657bfa50..2a432d410 100644 --- a/docs/jdocs/neureka/ndim/config/types/simple/package-tree.html +++ b/docs/jdocs/neureka/ndim/config/types/simple/package-tree.html @@ -1,91 +1,154 @@ - + + - -neureka.ndim.config.types.simple Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.simple Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.config.types.simple

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced0DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced0DConfiguration.html index 4f7be58f8..f0fb1fc84 100644 --- a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced0DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced0DConfiguration.html @@ -1,427 +1,545 @@ - + + - -Sliced0DConfiguration (neureka 1.0.0 API) - - - - + +Sliced0DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sliced0DConfiguration

    +
    neureka.ndim.config.types.sliced
    +

    Class Sliced0DConfiguration

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.sliced.Sliced0DConfiguration
    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public final class Sliced0DConfiguration
      +extends AbstractNDC
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Sliced0DConfiguration

        -
        protected Sliced0DConfiguration(int shape, - int offset)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Sliced0DConfiguration

            +
            protected Sliced0DConfiguration(int shape,
            +                                int offset)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Sliced0DConfiguration construct(int[] shape, - int[] offset)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Sliced0DConfiguration construct(int[] shape,
          +                                              int[] offset)
        • -
        • -
          -

          rank

          -
          public int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public int indicesMap(int i)
    -
    Description copied from interface: NDConfiguration
    + + + + +
      +
    • +

      indicesMap

      +
      public int indicesMap(int i)
      +
      Description copied from interface: NDConfiguration
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -430,53 +548,118 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced1DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced1DConfiguration.html index fde9d1461..0dd738b1b 100644 --- a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced1DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced1DConfiguration.html @@ -1,479 +1,608 @@ - + + - -Sliced1DConfiguration (neureka 1.0.0 API) - - - - + +Sliced1DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sliced1DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D1C -
    neureka.ndim.config.types.sliced.Sliced1DConfiguration
    +
    neureka.ndim.config.types.sliced
    +

    Class Sliced1DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Sliced1DConfiguration
      +extends D1C
      + +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Sliced1DConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Sliced1DConfiguration construct(int[] shape,
          +                                              int[] strides,
          +                                              int[] indicesMap,
          +                                              int[] spread,
          +                                              int[] offset)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration. +
      +
      Parameters:
      i - The index of the axis whose indices map value ought to be returned.
      -
      Returns:
      +
      Returns:
      The indices map value targeted by the provided index.
      -
  • -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -482,63 +611,131 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1)
    -
    -
    Specified by:
    -
    indexOfIndices in class D1C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1)
      +
      +
      Specified by:
      +
      indexOfIndices in class D1C
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced2DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced2DConfiguration.html index 8b4964ebf..2fabf447f 100644 --- a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced2DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced2DConfiguration.html @@ -1,489 +1,622 @@ - + + - -Sliced2DConfiguration (neureka 1.0.0 API) - - - - + +Sliced2DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sliced2DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D2C -
    neureka.ndim.config.types.sliced.Sliced2DConfiguration
    +
    neureka.ndim.config.types.sliced
    +

    Class Sliced2DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Sliced2DConfiguration
      +extends D2C
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from interface neureka.ndim.config.NDConfiguration

      -NDConfiguration.IndexToIndexFunction, NDConfiguration.Layout, NDConfiguration.Utility
      -
      + -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final int
      - -
      +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected int_shape1
        The shape of the NDArray.
        - -
        protected final int
        - -
         
        - - +
        protected int_shape2 
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      -
      Sliced2DConfiguration(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected Sliced2DConfiguration(int[] shape, + int[] strides, + int[] indicesMap, + int[] spread, + int[] offset) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      construct(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
       
      -
      final int
      -
      indexOfIndex(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Sliced2DConfigurationconstruct(int[] shape, + int[] strides, + int[] indicesMap, + int[] spread, + int[] offset) 
        intindexOfIndex(int index)
        Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
        - -
        final int
        -
        indexOfIndices(int[] indices)
        -
        +
        intindexOfIndices(int[] indices)
        The following method calculates the true index for an element in the data array based on a provided index array.
        - -
        final int
        -
        indexOfIndices(int d1, - int d2)
        -
        -
        final int[]
        - -
        +
        intindexOfIndices(int d1, + int d2)
        int[]indicesMap()
        If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
        - -
        final int
        -
        indicesMap(int i)
        -
        + for every axis of the tensor represented by this NDConfiguration.
        +
        intindicesMap(int i)
        This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
        - -
        final int[]
        -
        indicesOfIndex(int index)
        -
        +
        int[]indicesOfIndex(int index)
        The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
        - -
        final int[]
        - -
        +
        int[]offset()
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        offset(int i)
        -
        +
        intoffset(int i)
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        - -
        +
        intrank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        - -
        final int[]
        - -
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
        +
        int[]shape()
        This method returns an array of axis sizes.
        - -
        final int
        -
        shape(int i)
        -
        +
        intshape(int i)
        This method receives an axis index and return the size of the axis.
        - -
        final int[]
        - -
        +
        int[]spread()
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        spread(int i)
        -
        +
        intspread(int i)
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int[]
        - -
        +
        int[]strides()
        The array returned by this method is used to translate an array of axis indices to a single ata array index.
        - -
        final int
        -
        strides(int i)
        -
        +
        intstrides(int i)
        This method receives an axis index and returns the translation value for the targeted axis.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        + +
      • +
      +
    • +
    -
    - - -
    -

    Methods inherited from class neureka.ndim.config.AbstractNDC

    -_cacheArray, _cached, _simpleReshape, equals, equals, hashCode, newReshaped, toString
    -
    -

    Methods inherited from class java.lang.Object

    -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.ndim.config.NDConfiguration

    -asInlineArray, getIndexToIndexAccessPattern, getLayout, getTraits, has, isCompact, isSimple, isVirtual, size
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          _shape1

          -
          protected final int _shape1
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              _shape1

              +
              protected final int _shape1
              The shape of the NDArray.
              -
        • -
        • -
          -

          _shape2

          -
          protected final int _shape2
          -
          +
        + + + +
          +
        • +

          _shape2

          +
          protected final int _shape2
        -
      • +
      -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Sliced2DConfiguration

        -
        protected Sliced2DConfiguration(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Sliced2DConfiguration

            +
            protected Sliced2DConfiguration(int[] shape,
            +                                int[] strides,
            +                                int[] indicesMap,
            +                                int[] spread,
            +                                int[] offset)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Sliced2DConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Sliced2DConfiguration construct(int[] shape,
          +                                              int[] strides,
          +                                              int[] indicesMap,
          +                                              int[] spread,
          +                                              int[] offset)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor. +
          +
          Returns:
          The number of axis of an nd-array.
          -
  • -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration. +
      +
      Returns:
      An array of values which are used to map an index to an indices array.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration. +
      +
      Parameters:
      i - The index of the axis whose indices map value ought to be returned.
      -
      Returns:
      +
      Returns:
      The indices map value targeted by the provided index.
      -
  • -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -492,64 +625,132 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1, - int d2)
    -
    -
    Specified by:
    -
    indexOfIndices in class D2C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1,
      +                                int d2)
      +
      +
      Specified by:
      +
      indexOfIndices in class D2C
      -
  • -
    - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced3DConfiguration.html b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced3DConfiguration.html index 528d02046..68a7df850 100644 --- a/docs/jdocs/neureka/ndim/config/types/sliced/Sliced3DConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/sliced/Sliced3DConfiguration.html @@ -1,499 +1,636 @@ - + + - -Sliced3DConfiguration (neureka 1.0.0 API) - - - - + +Sliced3DConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sliced3DConfiguration

    -
    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.D3C -
    neureka.ndim.config.types.sliced.Sliced3DConfiguration
    +
    neureka.ndim.config.types.sliced
    +

    Class Sliced3DConfiguration

    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class Sliced3DConfiguration
      +extends D3C
      + +
    +
    +
    +
      +
    • -
    • -
      -

      Nested Class Summary

      -
      -

      Nested classes/interfaces inherited from interface neureka.ndim.config.NDConfiguration

      -NDConfiguration.IndexToIndexFunction, NDConfiguration.Layout, NDConfiguration.Utility
      -
      + -
    • -
      -

      Field Summary

      -
      Fields
      -
      -
      Modifier and Type
      -
      Field
      -
      Description
      -
      protected final int
      - -
      +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected int_shape1
        The shape of the NDArray.
        - -
        protected final int
        - -
         
        -
        protected final int
        - -
         
        - - +
        protected int_shape2 
        protected int_shape3 
      • +
      -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Modifier
      -
      Constructor
      -
      Description
      -
      protected
      -
      Sliced3DConfiguration(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
       
      -
      -
      +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected Sliced3DConfiguration(int[] shape, + int[] strides, + int[] indicesMap, + int[] spread, + int[] offset) 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      construct(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
       
      -
      final int
      -
      indexOfIndex(int index)
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Sliced3DConfigurationconstruct(int[] shape, + int[] strides, + int[] indicesMap, + int[] spread, + int[] offset) 
        intindexOfIndex(int index)
        Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index".
        - -
        final int
        -
        indexOfIndices(int[] indices)
        -
        +
        intindexOfIndices(int[] indices)
        The following method calculates the true index for an element in the data array based on a provided index array.
        - -
        final int
        -
        indexOfIndices(int d1, - int d2, - int d3)
        -
        -
        final int[]
        - -
        +
        intindexOfIndices(int d1, + int d2, + int d3)
        int[]indicesMap()
        If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
        - -
        final int
        -
        indicesMap(int i)
        -
        + for every axis of the tensor represented by this NDConfiguration.
        +
        intindicesMap(int i)
        This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration.
        - -
        final int[]
        -
        indicesOfIndex(int index)
        -
        +
        int[]indicesOfIndex(int index)
        The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index".
        - -
        final int[]
        - -
        +
        int[]offset()
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        offset(int i)
        -
        +
        intoffset(int i)
        The offset is the position of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        - -
        +
        intrank()
        This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
        - -
        final int[]
        - -
        + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
        +
        int[]shape()
        This method returns an array of axis sizes.
        - -
        final int
        -
        shape(int i)
        -
        +
        intshape(int i)
        This method receives an axis index and return the size of the axis.
        - -
        final int[]
        - -
        +
        int[]spread()
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int
        -
        spread(int i)
        -
        +
        intspread(int i)
        The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
        - -
        final int[]
        - -
        +
        int[]strides()
        The array returned by this method is used to translate an array of axis indices to a single ata array index.
        - -
        final int
        -
        strides(int i)
        -
        +
        intstrides(int i)
        This method receives an axis index and returns the translation value for the targeted axis.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        + +
      • +
      +
    • +
    -
    - - -
    -

    Methods inherited from class neureka.ndim.config.AbstractNDC

    -_cacheArray, _cached, _simpleReshape, equals, equals, hashCode, newReshaped, toString
    -
    -

    Methods inherited from class java.lang.Object

    -clone, finalize, getClass, notify, notifyAll, wait, wait, wait
    -
    -

    Methods inherited from interface neureka.ndim.config.NDConfiguration

    -asInlineArray, getIndexToIndexAccessPattern, getLayout, getTraits, has, isCompact, isSimple, isVirtual, size
    - - - - -
    -
      +
      +
        +
      • -
      • -
        -

        Field Details

        -
          -
        • -
          -

          _shape1

          -
          protected final int _shape1
          +
            +
          • + + +

            Field Detail

            + + + +
              +
            • +

              _shape1

              +
              protected final int _shape1
              The shape of the NDArray.
              -
        • -
        • -
          -

          _shape2

          -
          protected final int _shape2
          -
          +
        + + + +
          +
        • +

          _shape2

          +
          protected final int _shape2
        • -
        • -
          -

          _shape3

          -
          protected final int _shape3
          -
          +
        + + + +
          +
        • +

          _shape3

          +
          protected final int _shape3
        -
      • +
      -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Sliced3DConfiguration

        -
        protected Sliced3DConfiguration(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Sliced3DConfiguration

            +
            protected Sliced3DConfiguration(int[] shape,
            +                                int[] strides,
            +                                int[] indicesMap,
            +                                int[] spread,
            +                                int[] offset)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static Sliced3DConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static Sliced3DConfiguration construct(int[] shape,
          +                                              int[] strides,
          +                                              int[] indicesMap,
          +                                              int[] spread,
          +                                              int[] offset)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor. +
          +
          Returns:
          The number of axis of an nd-array.
          -
  • -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration. +
      +
      Returns:
      An array of values which are used to map an index to an indices array.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration. +
      +
      Parameters:
      i - The index of the axis whose indices map value ought to be returned.
      -
      Returns:
      +
      Returns:
      The indices map value targeted by the provided index.
      -
  • -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -502,65 +639,133 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int d1, - int d2, - int d3)
    -
    -
    Specified by:
    -
    indexOfIndices in class D3C
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int d1,
      +                                int d2,
      +                                int d3)
      +
      +
      Specified by:
      +
      indexOfIndices in class D3C
      -
  • -
    - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/SlicedNDConfiguration.html b/docs/jdocs/neureka/ndim/config/types/sliced/SlicedNDConfiguration.html index 9be4bd227..34f3fc356 100644 --- a/docs/jdocs/neureka/ndim/config/types/sliced/SlicedNDConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/sliced/SlicedNDConfiguration.html @@ -1,438 +1,556 @@ - + + - -SlicedNDConfiguration (neureka 1.0.0 API) - - - - + +SlicedNDConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SlicedNDConfiguration

    +
    neureka.ndim.config.types.sliced
    +

    Class SlicedNDConfiguration

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.sliced.SlicedNDConfiguration
    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class SlicedNDConfiguration
      +extends AbstractNDC
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SlicedNDConfiguration

        -
        protected SlicedNDConfiguration(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SlicedNDConfiguration

            +
            protected SlicedNDConfiguration(int[] shape,
            +                                int[] strides,
            +                                int[] indicesMap,
            +                                int[] spread,
            +                                int[] offset)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      construct

      -
      public static SlicedNDConfiguration construct(int[] shape, - int[] strides, - int[] indicesMap, - int[] spread, - int[] offset)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          construct

          +
          public static SlicedNDConfiguration construct(int[] shape,
          +                                              int[] strides,
          +                                              int[] indicesMap,
          +                                              int[] spread,
          +                                              int[] offset)
        • -
        • -
          -

          rank

          -
          public final int rank()
          +
        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration. +
      +
      Parameters:
      i - The index of the axis whose indices map value ought to be returned.
      -
      Returns:
      +
      Returns:
      The indices map value targeted by the provided index.
      -
  • -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -441,53 +559,118 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/package-frame.html b/docs/jdocs/neureka/ndim/config/types/sliced/package-frame.html new file mode 100644 index 000000000..d95d2d588 --- /dev/null +++ b/docs/jdocs/neureka/ndim/config/types/sliced/package-frame.html @@ -0,0 +1,23 @@ + + + + + +neureka.ndim.config.types.sliced (neureka 1.0.1 API) + + + + +

    neureka.ndim.config.types.sliced

    + + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/package-summary.html b/docs/jdocs/neureka/ndim/config/types/sliced/package-summary.html index 60f552fbb..ccdfa7583 100644 --- a/docs/jdocs/neureka/ndim/config/types/sliced/package-summary.html +++ b/docs/jdocs/neureka/ndim/config/types/sliced/package-summary.html @@ -1,107 +1,155 @@ - + + - -neureka.ndim.config.types.sliced (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.sliced (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.config.types.sliced

    -
    -
    -
    package neureka.ndim.config.types.sliced
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/ndim/config/types/sliced/package-tree.html b/docs/jdocs/neureka/ndim/config/types/sliced/package-tree.html index aa0ebe2b7..454636f58 100644 --- a/docs/jdocs/neureka/ndim/config/types/sliced/package-tree.html +++ b/docs/jdocs/neureka/ndim/config/types/sliced/package-tree.html @@ -1,91 +1,154 @@ - + + - -neureka.ndim.config.types.sliced Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.sliced Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.config.types.sliced

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/SimpleReshapeView.html b/docs/jdocs/neureka/ndim/config/types/views/SimpleReshapeView.html index 616b24543..7a1b77ea3 100644 --- a/docs/jdocs/neureka/ndim/config/types/views/SimpleReshapeView.html +++ b/docs/jdocs/neureka/ndim/config/types/views/SimpleReshapeView.html @@ -1,413 +1,527 @@ - + + - -SimpleReshapeView (neureka 1.0.0 API) - - - - + +SimpleReshapeView (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SimpleReshapeView

    +
    neureka.ndim.config.types.views
    +

    Class SimpleReshapeView

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.views.SimpleReshapeView
    -
    -
    -
    -
    +
    + +
    +
    -
    -
      +
      +
      public class SimpleReshapeView
      +extends AbstractNDC
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SimpleReshapeView

        -
        public SimpleReshapeView(int[] form, - NDConfiguration toBeViewed)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SimpleReshapeView

            +
            public SimpleReshapeView(int[] form,
            +                         NDConfiguration toBeViewed)
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      rank

      -
      public final int rank()
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          rank

          +
          public final int rank()
          This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
          -
          -
          Returns:
          + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
  • +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -416,53 +530,118 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/package-frame.html b/docs/jdocs/neureka/ndim/config/types/views/package-frame.html new file mode 100644 index 000000000..2a1eda906 --- /dev/null +++ b/docs/jdocs/neureka/ndim/config/types/views/package-frame.html @@ -0,0 +1,19 @@ + + + + + +neureka.ndim.config.types.views (neureka 1.0.1 API) + + + + +

    neureka.ndim.config.types.views

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/package-summary.html b/docs/jdocs/neureka/ndim/config/types/views/package-summary.html index 43e5a798b..f2fa900ae 100644 --- a/docs/jdocs/neureka/ndim/config/types/views/package-summary.html +++ b/docs/jdocs/neureka/ndim/config/types/views/package-summary.html @@ -1,101 +1,139 @@ - + + - -neureka.ndim.config.types.views (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.views (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.config.types.views

    -
    -
    -
    package neureka.ndim.config.types.views
    -
    -
    -
    + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/package-tree.html b/docs/jdocs/neureka/ndim/config/types/views/package-tree.html index eeab1f9ac..6b81d167c 100644 --- a/docs/jdocs/neureka/ndim/config/types/views/package-tree.html +++ b/docs/jdocs/neureka/ndim/config/types/views/package-tree.html @@ -1,75 +1,138 @@ - + + - -neureka.ndim.config.types.views Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.views Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.config.types.views

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/virtual/VirtualNDConfiguration.html b/docs/jdocs/neureka/ndim/config/types/views/virtual/VirtualNDConfiguration.html index 68e7d0020..2e21c3dc8 100644 --- a/docs/jdocs/neureka/ndim/config/types/views/virtual/VirtualNDConfiguration.html +++ b/docs/jdocs/neureka/ndim/config/types/views/virtual/VirtualNDConfiguration.html @@ -1,401 +1,513 @@ - + + - -VirtualNDConfiguration (neureka 1.0.0 API) - - - - + +VirtualNDConfiguration (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class VirtualNDConfiguration

    +
    neureka.ndim.config.types.views.virtual
    +

    Class VirtualNDConfiguration

    -
    java.lang.Object -
    neureka.ndim.config.AbstractNDC -
    neureka.ndim.config.types.views.virtual.VirtualNDConfiguration
    -
    -
    -
    -
    +
    + +
    +
    -
    -
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        construct

        -
        public static VirtualNDConfiguration construct(int[] shape)
        -
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            construct

            +
            public static VirtualNDConfiguration construct(int[] shape)
          • -
          • -
            -

            rank

            -
            public final int rank()
            +
          + + + +
            +
          • +

            rank

            +
            public final int rank()
            This method returns the number of axis of - a nd-array / Tensor which is equal to the - length of the shape of an nd-array / Tensor.
            -
            -
            Returns:
            + a nd-array / Tensor which is equal to the + length of the shape of an nd-array / Tensor.
    +
    +
    Returns:
    The number of axis of an nd-array.
    - -
  • -
    -

    shape

    -
    public final int[] shape()
    + + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      This method returns an array of axis sizes.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of axis sizes.
      -
  • -
  • -
    -

    shape

    -
    public final int shape(int i)
    + + + + +
      +
    • +

      shape

      +
      public final int shape(int i)
      This method receives an axis index and return the size of the axis. It enables readable access to the shape of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose size ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis size targeted by the provided index.
      -
  • -
  • -
    -

    indicesMap

    -
    public final int[] indicesMap()
    + + + + +
      +
    • +

      indicesMap

      +
      public final int[] indicesMap()
      If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Returns:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Returns:
    An array of values which are used to map an index to an indices array.
    - -
  • -
    -

    indicesMap

    -
    public final int indicesMap(int i)
    + + + + +
      +
    • +

      indicesMap

      +
      public final int indicesMap(int i)
      This method receives an axis index and return the indices mapping value of said axis to enable readable access to the indices map of this configuration. If one wants to for example access the fourth last item of all items - within a tensor based on a scalar index x then the NDConfiguration.indicesMap() + within a tensor based on a scalar index x then the NDConfiguration.indicesMap() is needed as a basis for translating said scalar index x to an array of indices - for every axis of the tensor represented by this NDConfiguration.
      -
      -
      Parameters:
      + for every axis of the tensor represented by this NDConfiguration.
  • +
    +
    Parameters:
    i - The index of the axis whose indices map value ought to be returned.
    -
    Returns:
    +
    Returns:
    The indices map value targeted by the provided index.
    - -
  • -
    -

    strides

    -
    public final int[] strides()
    + + + + +
  • +
    +
    Returns:
    An array of values used to translate the axes indices to a data array index.
    - -
  • -
    -

    strides

    -
    public final int strides(int i)
    + + + + +
      +
    • +

      strides

      +
      public final int strides(int i)
      This method receives an axis index and returns the translation value for the targeted axis. It enables readable and fast access to the translation of this configuration.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The index of the axis whose translation ought to be returned.
      -
      Returns:
      +
      Returns:
      The axis translation targeted by the provided index.
      -
  • -
  • -
    -

    spread

    -
    public final int[] spread()
    + + + + +
      +
    • +

      spread

      +
      public final int[] spread()
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor.
      -
      -
      Returns:
      +
      +
      Returns:
      An array of index step sizes for each tensor dimension / axis.
      -
  • -
  • -
    -

    spread

    -
    public final int spread(int i)
    + + + + +
      +
    • +

      spread

      +
      public final int spread(int i)
      The spread is the access step size of a slice within the n-dimensional data array of its parent tensor. Use this to look up the spread in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose spread should be returned.
      -
      Returns:
      +
      Returns:
      The spread of the targeted dimension.
      -
  • -
  • -
    -

    offset

    -
    public final int[] offset()
    + + + + +
      +
    • +

      offset

      +
      public final int[] offset()
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to get the offsets of all slice dimension.
      -
      -
      Returns:
      +
      +
      Returns:
      The offset position of the slice tensor inside the n-dimensional data array of the parent tensor.
      -
  • -
  • -
    -

    offset

    -
    public final int offset(int i)
    + + + + +
      +
    • +

      offset

      +
      public final int offset(int i)
      The offset is the position of a slice within the n-dimensional data array of its parent tensor. Use this to look up the offset in a particular dimension / axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      i - The dimension / axis index of the dimension / axis whose offset should be returned.
      -
      Returns:
      +
      Returns:
      The offset of the targeted dimension.
      -
  • -
  • -
    -

    indexOfIndex

    -
    public final int indexOfIndex(int index)
    + + + + +
      +
    • +

      indexOfIndex

      +
      public final int indexOfIndex(int index)
      Use this to calculate the true index for an element in the data array (data array index) based on a provided "virtual index", or "value array index". This virtual index may be different from the true index depending on the type of nd-array, @@ -404,64 +516,132 @@

      indexOfIndex

      This virtual index ought to be turned into an index array which defines the position for every axis. Then this indices array will be converted into the final and true index targeting an underlying item. The information needed for performing this translation is expressed by individual implementations of - this NDConfiguration interface, which contain everything + this NDConfiguration interface, which contain everything needed to treat a given block of data as a nd-array!
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The true index which targets the actual data within the underlying data array of an nd-array / tensor.
      -
  • -
  • -
    -

    indicesOfIndex

    -
    public final int[] indicesOfIndex(int index)
    + + + + +
      +
    • +

      indicesOfIndex

      +
      public final int[] indicesOfIndex(int index)
      The following method calculates the axis indices for an element in the nd-array array based on a provided "virtual index". The resulting index defines the position of the element for every axis.
      -
      -
      Parameters:
      +
      +
      Parameters:
      index - The virtual index of the tensor having this configuration.
      -
      Returns:
      +
      Returns:
      The position of the (virtually) targeted element represented as an array of axis indices.
      -
  • -
  • -
    -

    indexOfIndices

    -
    public final int indexOfIndices(int[] indices)
    + + + + +
      +
    • +

      indexOfIndices

      +
      public final int indexOfIndices(int[] indices)
      The following method calculates the true index for an element in the data array based on a provided index array.
      -
      -
      Parameters:
      +
      +
      Parameters:
      indices - The indices for every axis of a given nd-array.
      -
      Returns:
      +
      Returns:
      The true index targeting the underlying data array of a given nd-array.
      -
  • -
  • -
    -

    isVirtual

    -
    public final boolean isVirtual()
    -
    -
    Returns:
    -
    The truth value determining if this NDConfiguration - represents virtual tensors (see Tensor.isVirtual()).
    + + + + +
      +
    • +

      isVirtual

      +
      public final boolean isVirtual()
      +
      +
      Returns:
      +
      The truth value determining if this NDConfiguration + represents virtual tensors (see Tensor.isVirtual()).
      -
  • - - + + + + - + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/virtual/package-frame.html b/docs/jdocs/neureka/ndim/config/types/views/virtual/package-frame.html new file mode 100644 index 000000000..a525ccd9d --- /dev/null +++ b/docs/jdocs/neureka/ndim/config/types/views/virtual/package-frame.html @@ -0,0 +1,19 @@ + + + + + +neureka.ndim.config.types.views.virtual (neureka 1.0.1 API) + + + + +

    neureka.ndim.config.types.views.virtual

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/virtual/package-summary.html b/docs/jdocs/neureka/ndim/config/types/views/virtual/package-summary.html index 2e11a09e4..d0a11f0fd 100644 --- a/docs/jdocs/neureka/ndim/config/types/views/virtual/package-summary.html +++ b/docs/jdocs/neureka/ndim/config/types/views/virtual/package-summary.html @@ -1,97 +1,143 @@ - + + - -neureka.ndim.config.types.views.virtual (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.views.virtual (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.config.types.views.virtual

    +

    Package neureka.ndim.config.types.views.virtual

    -
    -
    package neureka.ndim.config.types.views.virtual
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/config/types/views/virtual/package-tree.html b/docs/jdocs/neureka/ndim/config/types/views/virtual/package-tree.html index fc192fc43..880a3a352 100644 --- a/docs/jdocs/neureka/ndim/config/types/views/virtual/package-tree.html +++ b/docs/jdocs/neureka/ndim/config/types/views/virtual/package-tree.html @@ -1,75 +1,138 @@ - + + - -neureka.ndim.config.types.views.virtual Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.config.types.views.virtual Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.config.types.views.virtual

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/NDIterator.NonVirtual.html b/docs/jdocs/neureka/ndim/iterator/NDIterator.NonVirtual.html index 633a97aab..564f9fb38 100644 --- a/docs/jdocs/neureka/ndim/iterator/NDIterator.NonVirtual.html +++ b/docs/jdocs/neureka/ndim/iterator/NDIterator.NonVirtual.html @@ -1,222 +1,343 @@ - + + - -NDIterator.NonVirtual (neureka 1.0.0 API) - - - - + +NDIterator.NonVirtual (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Enum Class NDIterator.NonVirtual

    -
    -
    java.lang.Object -
    java.lang.Enum<NDIterator.NonVirtual> -
    neureka.ndim.iterator.NDIterator.NonVirtual
    +
    neureka.ndim.iterator
    +

    Enum NDIterator.NonVirtual

    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • + +
    • +
    +
    +
    -
    -
    - +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      values

      -
      public static NDIterator.NonVirtual[] values()
      -
      Returns an array containing the constants of this enum class, in -the order they are declared.
      -
      -
      Returns:
      -
      an array containing the constants of this enum class, in the order they are declared
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static NDIterator.NonVirtual[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (NDIterator.NonVirtual c : NDIterator.NonVirtual.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          -
    • -
    • -
      -

      valueOf

      -
      public static NDIterator.NonVirtual valueOf(String name)
      -
      Returns the enum constant of this class with the specified name. +
    + + + +
      +
    • +

      valueOf

      +
      public static NDIterator.NonVirtual valueOf(java.lang.String name)
      +
      Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an -enum constant in this class. (Extraneous whitespace characters are +enum constant in this type. (Extraneous whitespace characters are not permitted.)
      -
      -
      Parameters:
      +
      +
      Parameters:
      name - the name of the enum constant to be returned.
      -
      Returns:
      +
      Returns:
      the enum constant with the specified name
      -
      Throws:
      -
      IllegalArgumentException - if this enum class has no constant with the specified name
      -
      NullPointerException - if the argument is null
      +
      Throws:
      +
      java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
      +
      java.lang.NullPointerException - if the argument is null
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/NDIterator.html b/docs/jdocs/neureka/ndim/iterator/NDIterator.html index 13060c53b..e7fb9a5d3 100644 --- a/docs/jdocs/neureka/ndim/iterator/NDIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/NDIterator.html @@ -1,307 +1,457 @@ - + + - -NDIterator (neureka 1.0.0 API) - - - - + +NDIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface NDIterator

    +
    neureka.ndim.iterator
    +

    Interface NDIterator

    -
    -
    +
    +
    +
    -
    -
    - +
    +
    +
    + + + + + + + + +
      +
    • +

      shape

      +
      int shape(int i)
    • -
    • -
      -

      shape

      -
      int[] shape()
      -
      +
    + + + +
      +
    • +

      shape

      +
      int[] shape()
    • -
    • -
      -

      increment

      -
      void increment()
      -
      +
    + + + +
      +
    • +

      increment

      +
      void increment()
      +
    • +
    + + + +
      +
    • +

      getIndexAndIncrement

      +
      default int getIndexAndIncrement()
    • -
    • -
      -

      getIndexAndIncrement

      -
      default int getIndexAndIncrement()
      -
      +
    + + + +
      +
    • +

      decrement

      +
      void decrement()
    • -
    • -
      -

      decrement

      -
      void decrement()
      -
      +
    + + + +
      +
    • +

      i

      +
      int i()
    • -
    • -
      -

      i

      -
      int i()
      -
      +
    + + + +
      +
    • +

      get

      +
      int get(int axis)
    • -
    • -
      -

      get

      -
      int get(int axis)
      -
      +
    + + + +
      +
    • +

      get

      +
      int[] get()
    • -
    • -
      -

      get

      -
      int[] get()
      -
      +
    + + + +
      +
    • +

      set

      +
      void set(int axis,
      +         int position)
    • -
    • -
      -

      set

      -
      void set(int axis, - int position)
      -
      +
    + + + +
      +
    • +

      set

      +
      void set(int[] indices)
    • -
    • -
      -

      set

      -
      void set(int[] indices)
      -
      +
    + + + +
      +
    • +

      rank

      +
      int rank()
    • -
    • -
      -

      rank

      -
      int rank()
      -
      +
    - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/package-frame.html b/docs/jdocs/neureka/ndim/iterator/package-frame.html new file mode 100644 index 000000000..1428227a6 --- /dev/null +++ b/docs/jdocs/neureka/ndim/iterator/package-frame.html @@ -0,0 +1,23 @@ + + + + + +neureka.ndim.iterator (neureka 1.0.1 API) + + + + +

    neureka.ndim.iterator

    +
    +

    Interfaces

    + +

    Enums

    + +
    + + diff --git a/docs/jdocs/neureka/ndim/iterator/package-summary.html b/docs/jdocs/neureka/ndim/iterator/package-summary.html index e032939ce..b001d91c1 100644 --- a/docs/jdocs/neureka/ndim/iterator/package-summary.html +++ b/docs/jdocs/neureka/ndim/iterator/package-summary.html @@ -1,107 +1,158 @@ - + + - -neureka.ndim.iterator (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.iterator

    -
    -
    -
    package neureka.ndim.iterator
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/package-tree.html b/docs/jdocs/neureka/ndim/iterator/package-tree.html index a6be46e28..c9c75da10 100644 --- a/docs/jdocs/neureka/ndim/iterator/package-tree.html +++ b/docs/jdocs/neureka/ndim/iterator/package-tree.html @@ -1,81 +1,142 @@ - + + - -neureka.ndim.iterator Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.iterator

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted2DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted2DCIterator.html index 0e3403126..6742fbd3c 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted2DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted2DCIterator.html @@ -1,286 +1,461 @@ - + + - -Permuted2DCIterator (neureka 1.0.0 API) - - - - + +Permuted2DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Permuted2DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.permuted
    +

    Class Permuted2DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted3DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted3DCIterator.html index a718c2424..5a4ce7c59 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted3DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/permuted/Permuted3DCIterator.html @@ -1,286 +1,461 @@ - + + - -Permuted3DCIterator (neureka 1.0.0 API) - - - - + +Permuted3DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Permuted3DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.permuted
    +

    Class Permuted3DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/permuted/package-frame.html b/docs/jdocs/neureka/ndim/iterator/types/permuted/package-frame.html new file mode 100644 index 000000000..5b447e4c8 --- /dev/null +++ b/docs/jdocs/neureka/ndim/iterator/types/permuted/package-frame.html @@ -0,0 +1,20 @@ + + + + + +neureka.ndim.iterator.types.permuted (neureka 1.0.1 API) + + + + +

    neureka.ndim.iterator.types.permuted

    + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/permuted/package-summary.html b/docs/jdocs/neureka/ndim/iterator/types/permuted/package-summary.html index cb081a131..9ea1d6087 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/permuted/package-summary.html +++ b/docs/jdocs/neureka/ndim/iterator/types/permuted/package-summary.html @@ -1,84 +1,143 @@ - + + - -neureka.ndim.iterator.types.permuted (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.permuted (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.iterator.types.permuted

    -
    -
    -
    package neureka.ndim.iterator.types.permuted
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/permuted/package-tree.html b/docs/jdocs/neureka/ndim/iterator/types/permuted/package-tree.html index e23608a70..48ec90250 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/permuted/package-tree.html +++ b/docs/jdocs/neureka/ndim/iterator/types/permuted/package-tree.html @@ -1,80 +1,100 @@ - + + - -neureka.ndim.iterator.types.permuted Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.permuted Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.iterator.types.permuted

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/simple/Simple1DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/simple/Simple1DCIterator.html index cddd3f0b2..b3cf5b4c9 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/simple/Simple1DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/simple/Simple1DCIterator.html @@ -1,286 +1,461 @@ - + + - -Simple1DCIterator (neureka 1.0.0 API) - - - - + +Simple1DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Simple1DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.simple
    +

    Class Simple1DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/simple/Simple2DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/simple/Simple2DCIterator.html index e6ec3a299..53a576fd4 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/simple/Simple2DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/simple/Simple2DCIterator.html @@ -1,286 +1,461 @@ - + + - -Simple2DCIterator (neureka 1.0.0 API) - - - - + +Simple2DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Simple2DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.simple
    +

    Class Simple2DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/simple/Simple3DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/simple/Simple3DCIterator.html index 964be829e..63ab53c19 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/simple/Simple3DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/simple/Simple3DCIterator.html @@ -1,286 +1,461 @@ - + + - -Simple3DCIterator (neureka 1.0.0 API) - - - - + +Simple3DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Simple3DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.simple
    +

    Class Simple3DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/simple/package-frame.html b/docs/jdocs/neureka/ndim/iterator/types/simple/package-frame.html new file mode 100644 index 000000000..3abfb5958 --- /dev/null +++ b/docs/jdocs/neureka/ndim/iterator/types/simple/package-frame.html @@ -0,0 +1,21 @@ + + + + + +neureka.ndim.iterator.types.simple (neureka 1.0.1 API) + + + + +

    neureka.ndim.iterator.types.simple

    + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/simple/package-summary.html b/docs/jdocs/neureka/ndim/iterator/types/simple/package-summary.html index 21e036838..d154c99c3 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/simple/package-summary.html +++ b/docs/jdocs/neureka/ndim/iterator/types/simple/package-summary.html @@ -1,86 +1,147 @@ - + + - -neureka.ndim.iterator.types.simple (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.simple (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.iterator.types.simple

    -
    -
    -
    package neureka.ndim.iterator.types.simple
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/simple/package-tree.html b/docs/jdocs/neureka/ndim/iterator/types/simple/package-tree.html index 5e5e35a2c..625f4d125 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/simple/package-tree.html +++ b/docs/jdocs/neureka/ndim/iterator/types/simple/package-tree.html @@ -1,89 +1,109 @@ - + + - -neureka.ndim.iterator.types.simple Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.simple Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.iterator.types.simple

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced1DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced1DCIterator.html index a85800f57..c11bec51b 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced1DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced1DCIterator.html @@ -1,286 +1,461 @@ - + + - -Sliced1DCIterator (neureka 1.0.0 API) - - - - + +Sliced1DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sliced1DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.sliced
    +

    Class Sliced1DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced2DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced2DCIterator.html index 7803d27e9..05d46f5b2 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced2DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced2DCIterator.html @@ -1,286 +1,461 @@ - + + - -Sliced2DCIterator (neureka 1.0.0 API) - - - - + +Sliced2DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sliced2DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.sliced
    +

    Class Sliced2DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced3DCIterator.html b/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced3DCIterator.html index b68155b2c..adbb54064 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced3DCIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/sliced/Sliced3DCIterator.html @@ -1,286 +1,461 @@ - + + - -Sliced3DCIterator (neureka 1.0.0 API) - - - - + +Sliced3DCIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Sliced3DCIterator

    -
    -
    java.lang.Object - +
    neureka.ndim.iterator.types.sliced
    +

    Class Sliced3DCIterator

    -
    -
    +
    + +
    +
    -
    - +
    +
    +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      increment

      -
      public final void increment()
      -
      -
      Specified by:
      -
      increment in interface NDIterator
      +
    • -
    • -
      -

      decrement

      -
      public final void decrement()
      -
      -
      Specified by:
      -
      decrement in interface NDIterator
      +
    + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/sliced/SlicedNDIterator.html b/docs/jdocs/neureka/ndim/iterator/types/sliced/SlicedNDIterator.html index df6311ffe..0458d4311 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/sliced/SlicedNDIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/sliced/SlicedNDIterator.html @@ -1,298 +1,454 @@ - + + - -SlicedNDIterator (neureka 1.0.0 API) - - - - + +SlicedNDIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SlicedNDIterator

    -
    -
    java.lang.Object -
    neureka.ndim.iterator.types.sliced.SlicedNDIterator
    +
    neureka.ndim.iterator.types.sliced
    +

    Class SlicedNDIterator

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.ndim.iterator.types.sliced.SlicedNDIterator
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class SlicedNDIterator
      +extends java.lang.Object
      +implements NDIterator
      + +
    +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      shape

      -
      public final int shape(int i)
      -
      -
      Specified by:
      -
      shape in interface NDIterator
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          shape

          +
          public final int shape(int i)
          +
          +
          Specified by:
          +
          shape in interface NDIterator
          -
    • -
    • -
      -

      shape

      -
      public final int[] shape()
      -
      -
      Specified by:
      -
      shape in interface NDIterator
      +
    + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      +
      +
      Specified by:
      +
      shape in interface NDIterator
      -
  • -
  • -
    -

    increment

    -
    public final void increment()
    -
    -
    Specified by:
    -
    increment in interface NDIterator
    + + + + +
      +
    • +

      increment

      +
      public final void increment()
      +
      +
      Specified by:
      +
      increment in interface NDIterator
      -
  • -
  • -
    -

    decrement

    -
    public final void decrement()
    -
    -
    Specified by:
    -
    decrement in interface NDIterator
    + + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    rank

    -
    public final int rank()
    -
    -
    Specified by:
    -
    rank in interface NDIterator
    + + + + +
      +
    • +

      rank

      +
      public final int rank()
      +
      +
      Specified by:
      +
      rank in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/sliced/package-frame.html b/docs/jdocs/neureka/ndim/iterator/types/sliced/package-frame.html new file mode 100644 index 000000000..89d9abd3f --- /dev/null +++ b/docs/jdocs/neureka/ndim/iterator/types/sliced/package-frame.html @@ -0,0 +1,22 @@ + + + + + +neureka.ndim.iterator.types.sliced (neureka 1.0.1 API) + + + + +

    neureka.ndim.iterator.types.sliced

    + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/sliced/package-summary.html b/docs/jdocs/neureka/ndim/iterator/types/sliced/package-summary.html index 4850a6769..f2914aad6 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/sliced/package-summary.html +++ b/docs/jdocs/neureka/ndim/iterator/types/sliced/package-summary.html @@ -1,88 +1,151 @@ - + + - -neureka.ndim.iterator.types.sliced (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.sliced (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.iterator.types.sliced

    -
    -
    -
    package neureka.ndim.iterator.types.sliced
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/sliced/package-tree.html b/docs/jdocs/neureka/ndim/iterator/types/sliced/package-tree.html index da6dadcec..aca377105 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/sliced/package-tree.html +++ b/docs/jdocs/neureka/ndim/iterator/types/sliced/package-tree.html @@ -1,102 +1,165 @@ - + + - -neureka.ndim.iterator.types.sliced Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.sliced Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.iterator.types.sliced

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/virtual/VirtualNDIterator.html b/docs/jdocs/neureka/ndim/iterator/types/virtual/VirtualNDIterator.html index 09158fafe..6d14ab5a6 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/virtual/VirtualNDIterator.html +++ b/docs/jdocs/neureka/ndim/iterator/types/virtual/VirtualNDIterator.html @@ -1,298 +1,454 @@ - + + - -VirtualNDIterator (neureka 1.0.0 API) - - - - + +VirtualNDIterator (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class VirtualNDIterator

    -
    -
    java.lang.Object -
    neureka.ndim.iterator.types.virtual.VirtualNDIterator
    +
    neureka.ndim.iterator.types.virtual
    +

    Class VirtualNDIterator

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.ndim.iterator.types.virtual.VirtualNDIterator
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public final class VirtualNDIterator
      +extends java.lang.Object
      +implements NDIterator
      + +
    +
    +
    + - -
    -
      +
    +
    + -
  • -
    -

    Method Details

    -
      -
    • -
      -

      shape

      -
      public final int shape(int i)
      -
      -
      Specified by:
      -
      shape in interface NDIterator
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          shape

          +
          public final int shape(int i)
          +
          +
          Specified by:
          +
          shape in interface NDIterator
          -
    • -
    • -
      -

      shape

      -
      public final int[] shape()
      -
      -
      Specified by:
      -
      shape in interface NDIterator
      +
    + + + +
      +
    • +

      shape

      +
      public final int[] shape()
      +
      +
      Specified by:
      +
      shape in interface NDIterator
      -
  • -
  • -
    -

    increment

    -
    public final void increment()
    -
    -
    Specified by:
    -
    increment in interface NDIterator
    + + + + +
      +
    • +

      increment

      +
      public final void increment()
      +
      +
      Specified by:
      +
      increment in interface NDIterator
      -
  • -
  • -
    -

    decrement

    -
    public final void decrement()
    -
    -
    Specified by:
    -
    decrement in interface NDIterator
    + + + + +
      +
    • +

      decrement

      +
      public final void decrement()
      +
      +
      Specified by:
      +
      decrement in interface NDIterator
      -
  • -
  • -
    -

    i

    -
    public final int i()
    -
    -
    Specified by:
    -
    i in interface NDIterator
    + + + + +
      +
    • +

      i

      +
      public final int i()
      +
      +
      Specified by:
      +
      i in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int get(int axis)
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int get(int axis)
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    get

    -
    public final int[] get()
    -
    -
    Specified by:
    -
    get in interface NDIterator
    + + + + +
      +
    • +

      get

      +
      public final int[] get()
      +
      +
      Specified by:
      +
      get in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int axis, - int position)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int axis,
      +                      int position)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    set

    -
    public final void set(int[] indices)
    -
    -
    Specified by:
    -
    set in interface NDIterator
    + + + + +
      +
    • +

      set

      +
      public final void set(int[] indices)
      +
      +
      Specified by:
      +
      set in interface NDIterator
      -
  • -
  • -
    -

    rank

    -
    public final int rank()
    -
    -
    Specified by:
    -
    rank in interface NDIterator
    + + + + +
      +
    • +

      rank

      +
      public final int rank()
      +
      +
      Specified by:
      +
      rank in interface NDIterator
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/virtual/package-frame.html b/docs/jdocs/neureka/ndim/iterator/types/virtual/package-frame.html new file mode 100644 index 000000000..85b5c3722 --- /dev/null +++ b/docs/jdocs/neureka/ndim/iterator/types/virtual/package-frame.html @@ -0,0 +1,19 @@ + + + + + +neureka.ndim.iterator.types.virtual (neureka 1.0.1 API) + + + + +

    neureka.ndim.iterator.types.virtual

    +
    +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/virtual/package-summary.html b/docs/jdocs/neureka/ndim/iterator/types/virtual/package-summary.html index 82131dac6..cd2ad1b49 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/virtual/package-summary.html +++ b/docs/jdocs/neureka/ndim/iterator/types/virtual/package-summary.html @@ -1,82 +1,139 @@ - + + - -neureka.ndim.iterator.types.virtual (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.virtual (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim.iterator.types.virtual

    -
    -
    -
    package neureka.ndim.iterator.types.virtual
    -
    -
      -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      - -
       
      -
      +

      Package neureka.ndim.iterator.types.virtual

      +
      + -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/iterator/types/virtual/package-tree.html b/docs/jdocs/neureka/ndim/iterator/types/virtual/package-tree.html index bacf202aa..fc05d66bd 100644 --- a/docs/jdocs/neureka/ndim/iterator/types/virtual/package-tree.html +++ b/docs/jdocs/neureka/ndim/iterator/types/virtual/package-tree.html @@ -1,71 +1,134 @@ - + + - -neureka.ndim.iterator.types.virtual Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim.iterator.types.virtual Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim.iterator.types.virtual

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/package-frame.html b/docs/jdocs/neureka/ndim/package-frame.html new file mode 100644 index 000000000..abc76917d --- /dev/null +++ b/docs/jdocs/neureka/ndim/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.ndim (neureka 1.0.1 API) + + + + +

    neureka.ndim

    +
    +

    Interfaces

    + +

    Classes

    + +
    + + diff --git a/docs/jdocs/neureka/ndim/package-summary.html b/docs/jdocs/neureka/ndim/package-summary.html index e87a39ef9..3d4ca0826 100644 --- a/docs/jdocs/neureka/ndim/package-summary.html +++ b/docs/jdocs/neureka/ndim/package-summary.html @@ -1,118 +1,171 @@ - + + - -neureka.ndim (neureka 1.0.0 API) - - - - + +neureka.ndim (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.ndim

    -
    -
    -
    package neureka.ndim
    -
    -
      -
    • - -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        Filler<T>
        Implementations of this ought to map the index of a tensor entry to a value which should be placed at that entry position.
        - - -
         
        - -
        +
        NDConstructor 
        NDimensional
        This interface defines the most essential methods of the nd-array/tensor API, which describe them with respect to their dimensionality.
        - - -
        +
        +
      • +
      • + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        NDUtil
        Static utility methods for the NDArray.
        - - - - +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/ndim/package-tree.html b/docs/jdocs/neureka/ndim/package-tree.html index a8c9ede99..37689a5f9 100644 --- a/docs/jdocs/neureka/ndim/package-tree.html +++ b/docs/jdocs/neureka/ndim/package-tree.html @@ -1,79 +1,140 @@ - + + - -neureka.ndim Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.ndim Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.ndim

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/optimization/Optimization.html b/docs/jdocs/neureka/optimization/Optimization.html index d0f087b60..aa1ce6c49 100644 --- a/docs/jdocs/neureka/optimization/Optimization.html +++ b/docs/jdocs/neureka/optimization/Optimization.html @@ -1,132 +1,226 @@ - + + - -Optimization (neureka 1.0.0 API) - - - - + +Optimization (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Optimization<V>

    +
    neureka.optimization
    +

    Interface Optimization<V>

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - - -
       
      -
      -
      +
      +
      public interface Optimization<V>
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/Optimizer.html b/docs/jdocs/neureka/optimization/Optimizer.html index 923b67c08..ed7f31294 100644 --- a/docs/jdocs/neureka/optimization/Optimizer.html +++ b/docs/jdocs/neureka/optimization/Optimizer.html @@ -1,89 +1,119 @@ - + + - -Optimizer (neureka 1.0.0 API) - - - - + +Optimizer (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface Optimizer<V>

    +
    neureka.optimization
    +

    Interface Optimizer<V>

    -
    -
    -
    Type Parameters:
    +
    +
    +
    -
    -
      + +
    +
    +
    +
    +
    + -
  • -
    -

    Method Details

    - + + + +
      +
    • +

      ofGradient

      +
      static <T> Optimizer<T> ofGradient(Optimization<T> o)
      +
      +
      Type Parameters:
      T - The value type parameter of the tensors processed by this optimizer.
      -
      Parameters:
      -
      o - The Optimization lambda which receives the gradient of a tensor for optimization.
      -
      Returns:
      -
      An Optimizer which will process the gradient of any passed tensor (see of(Optimization) which processes tensors directly).
      +
      Parameters:
      +
      o - The Optimization lambda which receives the gradient of a tensor for optimization.
      +
      Returns:
      +
      An Optimizer which will process the gradient of any passed tensor (see of(Optimization) which processes tensors directly).
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/OptimizerFactory.html b/docs/jdocs/neureka/optimization/OptimizerFactory.html index 86a64648f..b19c455a5 100644 --- a/docs/jdocs/neureka/optimization/OptimizerFactory.html +++ b/docs/jdocs/neureka/optimization/OptimizerFactory.html @@ -1,128 +1,222 @@ - + + - -OptimizerFactory (neureka 1.0.0 API) - - - - + +OptimizerFactory (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Interface OptimizerFactory

    +
    neureka.optimization
    +

    Interface OptimizerFactory

    -
    -
    +
    +
    +
    -
    -
      - -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      <V extends Number>
      Optimizer<V>
      -
      create(Tensor<V> target)
      -
       
      -
      -
      +
      +
      public interface OptimizerFactory
      +
    • +
    - +
    + - -
    -
      +
    +
    + - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/ADAM.html b/docs/jdocs/neureka/optimization/implementations/ADAM.html index 94b4015ff..3c67231b1 100644 --- a/docs/jdocs/neureka/optimization/implementations/ADAM.html +++ b/docs/jdocs/neureka/optimization/implementations/ADAM.html @@ -1,205 +1,347 @@ - + + - -ADAM (neureka 1.0.0 API) - - - - + +ADAM (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ADAM<V extends Number>

    -
    -
    java.lang.Object -
    neureka.optimization.implementations.ADAM<V>
    +
    neureka.optimization.implementations
    +

    Class ADAM<V extends java.lang.Number>

    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.ADAM<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The value type parameter of the tensor whose gradients are being optimized.
      -
      +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>
      +
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>

      -
      public final class ADAM<V extends Number> -extends Object -implements Optimizer<V>
      +
      +
      public final class ADAM<V extends java.lang.Number>
      +extends java.lang.Object
      +implements Optimizer<V>
      ADAM (short for Adaptive Moment Estimation) is an adaptive learning rate optimization algorithm that utilises both momentum and scaling, combining the benefits of RMSProp and SGD with respect to Momentum. The optimizer is designed to be appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients.
      -
    -
    -
      + +
    +
    +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      - + + + +
        +
      • +

        getMomentum

        +
        public final Tensor<V> getMomentum()
      • -
      • -
        -

        getVelocity

        -
        public final Tensor<V> getVelocity()
        -
        +
      + + + +
        +
      • +

        getVelocity

        +
        public final Tensor<V> getVelocity()
      • -
      • -
        -

        getTime

        -
        public final long getTime()
        -
        +
      + + + +
        +
      • +

        getTime

        +
        public final long getTime()
      • -
      • -
        -

        getLearningRate

        -
        public final double getLearningRate()
        -
        +
      + + + +
        +
      • +

        getLearningRate

        +
        public final double getLearningRate()
        +
      • +
    - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/ADAMFactory.html b/docs/jdocs/neureka/optimization/implementations/ADAMFactory.html index 9436bd645..6e6f7e4cd 100644 --- a/docs/jdocs/neureka/optimization/implementations/ADAMFactory.html +++ b/docs/jdocs/neureka/optimization/implementations/ADAMFactory.html @@ -1,196 +1,318 @@ - + + - -ADAMFactory (neureka 1.0.0 API) - - - - + +ADAMFactory (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class ADAMFactory

    -
    -
    java.lang.Object -
    neureka.optimization.implementations.ADAMFactory
    +
    neureka.optimization.implementations
    +

    Class ADAMFactory

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.ADAMFactory
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class ADAMFactory
      +extends java.lang.Object
      +implements OptimizerFactory
      +
    • +
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        ADAMFactory

        -
        public ADAMFactory()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            ADAMFactory

            +
            public ADAMFactory()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • -
  • -
    -

    create

    -
    public <V extends Number> ADAM<V> create(Tensor<V> momentum, - Tensor<V> velocity)
    -
    + + + + +
      +
    • +

      create

      +
      public <V extends java.lang.Number> ADAM<V> create(Tensor<V> momentum,
      +                                                   Tensor<V> velocity)
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/AdaGrad.html b/docs/jdocs/neureka/optimization/implementations/AdaGrad.html index 75ae4c2da..f2b8ebc26 100644 --- a/docs/jdocs/neureka/optimization/implementations/AdaGrad.html +++ b/docs/jdocs/neureka/optimization/implementations/AdaGrad.html @@ -1,168 +1,294 @@ - + + - -AdaGrad (neureka 1.0.0 API) - - - - + +AdaGrad (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AdaGrad<V extends Number>

    +
    neureka.optimization.implementations
    +

    Class AdaGrad<V extends java.lang.Number>

    -
    java.lang.Object -
    neureka.optimization.implementations.AdaGrad<V>
    -
    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.AdaGrad<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The super type of the value item type for the tensors whose gradients can be optimized by this.
      -
      +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>
      +
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>

      -
      public class AdaGrad<V extends Number> -extends Object -implements Optimizer<V>
      +
      +
      public class AdaGrad<V extends java.lang.Number>
      +extends java.lang.Object
      +implements Optimizer<V>
      Adaptive Gradients, or AdaGrad for short, is an extension of the gradient descent optimization algorithm that adjusts the step size for each parameter based on the squared gradients seen over the course of previous optimization steps.
      -
    -
    -
      + +
    +
    +
    +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/AdaGradFactory.html b/docs/jdocs/neureka/optimization/implementations/AdaGradFactory.html index b41cfd35b..e9758a5cf 100644 --- a/docs/jdocs/neureka/optimization/implementations/AdaGradFactory.html +++ b/docs/jdocs/neureka/optimization/implementations/AdaGradFactory.html @@ -1,176 +1,290 @@ - + + - -AdaGradFactory (neureka 1.0.0 API) - - - - + +AdaGradFactory (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class AdaGradFactory

    +
    neureka.optimization.implementations
    +

    Class AdaGradFactory

    -
    java.lang.Object -
    neureka.optimization.implementations.AdaGradFactory
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.AdaGradFactory
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class AdaGradFactory
      +extends java.lang.Object
      +implements OptimizerFactory
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        AdaGradFactory

        -
        public AdaGradFactory()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            AdaGradFactory

            +
            public AdaGradFactory()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/Momentum.html b/docs/jdocs/neureka/optimization/implementations/Momentum.html index 4fc7902a6..ea8e9836d 100644 --- a/docs/jdocs/neureka/optimization/implementations/Momentum.html +++ b/docs/jdocs/neureka/optimization/implementations/Momentum.html @@ -1,161 +1,287 @@ - + + - -Momentum (neureka 1.0.0 API) - - - - + +Momentum (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class Momentum<V extends Number>

    +
    neureka.optimization.implementations
    +

    Class Momentum<V extends java.lang.Number>

    -
    java.lang.Object -
    neureka.optimization.implementations.Momentum<V>
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.Momentum<V>
      • +
      +
    • +
    +
    +
    -
    -
      +
      +
      public class Momentum<V extends java.lang.Number>
      +extends java.lang.Object
      +implements Optimizer<V>
      + +
    +
    +
    +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/MomentumFactory.html b/docs/jdocs/neureka/optimization/implementations/MomentumFactory.html index 0a425644e..ec759d891 100644 --- a/docs/jdocs/neureka/optimization/implementations/MomentumFactory.html +++ b/docs/jdocs/neureka/optimization/implementations/MomentumFactory.html @@ -1,185 +1,303 @@ - + + - -MomentumFactory (neureka 1.0.0 API) - - - - + +MomentumFactory (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class MomentumFactory

    -
    -
    java.lang.Object -
    neureka.optimization.implementations.MomentumFactory
    +
    neureka.optimization.implementations
    +

    Class MomentumFactory

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.MomentumFactory
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class MomentumFactory
      +extends java.lang.Object
      +implements OptimizerFactory
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        MomentumFactory

        -
        public MomentumFactory()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            MomentumFactory

            +
            public MomentumFactory()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/RMSProp.html b/docs/jdocs/neureka/optimization/implementations/RMSProp.html index 4007aec1d..af593280c 100644 --- a/docs/jdocs/neureka/optimization/implementations/RMSProp.html +++ b/docs/jdocs/neureka/optimization/implementations/RMSProp.html @@ -1,171 +1,297 @@ - + + - -RMSProp (neureka 1.0.0 API) - - - - + +RMSProp (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class RMSProp<V extends Number>

    +
    neureka.optimization.implementations
    +

    Class RMSProp<V extends java.lang.Number>

    -
    java.lang.Object -
    neureka.optimization.implementations.RMSProp<V>
    -
    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.RMSProp<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The super type of the value item type for the tensors whose gradients can be optimized by this.
      -
      +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>
      +
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>

      -
      public class RMSProp<V extends Number> -extends Object -implements Optimizer<V>
      +
      +
      public class RMSProp<V extends java.lang.Number>
      +extends java.lang.Object
      +implements Optimizer<V>
      Root Mean Squared Propagation, or RMSProp, is an extension of gradient descent and the AdaGrad version of gradient descent that uses a decaying average of partial gradients in the adaptation of the step size for each parameter. - It is similar to AdaGrad in that it uses a moving average of + It is similar to AdaGrad in that it uses a moving average of the squared gradients to scale the learning rate.
      -
    -
    -
      + +
    +
    +
    +
    +
    + - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/RMSPropFactory.html b/docs/jdocs/neureka/optimization/implementations/RMSPropFactory.html index b79d1ea93..ff6cd7a2f 100644 --- a/docs/jdocs/neureka/optimization/implementations/RMSPropFactory.html +++ b/docs/jdocs/neureka/optimization/implementations/RMSPropFactory.html @@ -1,185 +1,303 @@ - + + - -RMSPropFactory (neureka 1.0.0 API) - - - - + +RMSPropFactory (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class RMSPropFactory

    -
    -
    java.lang.Object -
    neureka.optimization.implementations.RMSPropFactory
    +
    neureka.optimization.implementations
    +

    Class RMSPropFactory

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.RMSPropFactory
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class RMSPropFactory
      +extends java.lang.Object
      +implements OptimizerFactory
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        RMSPropFactory

        -
        public RMSPropFactory()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            RMSPropFactory

            +
            public RMSPropFactory()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/SGD.html b/docs/jdocs/neureka/optimization/implementations/SGD.html index 9d6b477ba..4ded6dac2 100644 --- a/docs/jdocs/neureka/optimization/implementations/SGD.html +++ b/docs/jdocs/neureka/optimization/implementations/SGD.html @@ -1,92 +1,123 @@ - + + - -SGD (neureka 1.0.0 API) - - - - + +SGD (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SGD<V>

    +
    neureka.optimization.implementations
    +

    Class SGD<V>

    -
    java.lang.Object -
    neureka.optimization.implementations.SGD<V>
    -
    -
    -
    -
    Type Parameters:
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.SGD<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Type Parameters:
      V - The value type parameter of the tensor whose gradients are being optimized.
      -
      +
      All Implemented Interfaces:
      -
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>
      +
      Component<Tensor<V>>, Optimization<V>, Optimizer<V>

      -
      public class SGD<V> -extends Object -implements Optimizer<V>
      +
      +
      public class SGD<V>
      +extends java.lang.Object
      +implements Optimizer<V>
      Stochastic Gradient Descent is an iterative optimization technique that uses the gradient of a weight variable to adjust said variable, in order to reduce the error used to calculate said gradient. @@ -94,88 +125,187 @@

      Class SGD<V>

      optimize the gradient based on previous gradients (network forward and backward passes) but simply applies the gradient value based on each example within the training dataset.
      -
    -
    -
      + +
    +
    +
    +
    +
    + - - +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/SGDFactory.html b/docs/jdocs/neureka/optimization/implementations/SGDFactory.html index 7d47aa73d..a613909e7 100644 --- a/docs/jdocs/neureka/optimization/implementations/SGDFactory.html +++ b/docs/jdocs/neureka/optimization/implementations/SGDFactory.html @@ -1,176 +1,290 @@ - + + - -SGDFactory (neureka 1.0.0 API) - - - - + +SGDFactory (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    - -

    Class SGDFactory

    +
    neureka.optimization.implementations
    +

    Class SGDFactory

    -
    java.lang.Object -
    neureka.optimization.implementations.SGDFactory
    -
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.optimization.implementations.SGDFactory
      • +
      +
    • +
    +
    +
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
      +
      public class SGDFactory
      +extends java.lang.Object
      +implements OptimizerFactory
      +
    • +
    - +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        SGDFactory

        -
        public SGDFactory()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            SGDFactory

            +
            public SGDFactory()
          -
    • +
    -
  • -
    -

    Method Details

    -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/package-frame.html b/docs/jdocs/neureka/optimization/implementations/package-frame.html new file mode 100644 index 000000000..d24b06ba1 --- /dev/null +++ b/docs/jdocs/neureka/optimization/implementations/package-frame.html @@ -0,0 +1,28 @@ + + + + + +neureka.optimization.implementations (neureka 1.0.1 API) + + + + +

    neureka.optimization.implementations

    + + + diff --git a/docs/jdocs/neureka/optimization/implementations/package-summary.html b/docs/jdocs/neureka/optimization/implementations/package-summary.html index 30eb2be5a..f7af10d6f 100644 --- a/docs/jdocs/neureka/optimization/implementations/package-summary.html +++ b/docs/jdocs/neureka/optimization/implementations/package-summary.html @@ -1,127 +1,191 @@ - + + - -neureka.optimization.implementations (neureka 1.0.0 API) - - - - + +neureka.optimization.implementations (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.optimization.implementations

    -
    -
    -
    package neureka.optimization.implementations
    -
    -
      -
    • - -
    • -
    • -
      -
      Classes
      -
      -
      Class
      -
      Description
      -
      AdaGrad<V extends Number>
      -
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        AdaGrad<V extends java.lang.Number>
        Adaptive Gradients, or AdaGrad for short, is an extension of the gradient descent optimization algorithm that adjusts the step size for each parameter based on the squared gradients seen over the course of previous optimization steps.
        - - -
         
        -
        ADAM<V extends Number>
        -
        +
        AdaGradFactory 
        ADAM<V extends java.lang.Number>
        ADAM (short for Adaptive Moment Estimation) is an adaptive learning rate optimization algorithm that utilises both momentum and scaling, combining the benefits of RMSProp and SGD with respect to Momentum.
        - - -
         
        -
        Momentum<V extends Number>
        -
         
        - -
         
        -
        RMSProp<V extends Number>
        -
        +
        ADAMFactory 
        Momentum<V extends java.lang.Number> 
        MomentumFactory 
        RMSProp<V extends java.lang.Number>
        Root Mean Squared Propagation, or RMSProp, is an extension of gradient descent and the AdaGrad version of gradient descent that uses a decaying average of partial gradients in the adaptation of the step size for each parameter.
        - - -
         
        -
        SGD<V>
        -
        +
        RMSPropFactory 
        SGD<V>
        Stochastic Gradient Descent is an iterative optimization technique that uses the gradient of a weight variable to adjust said variable, in order to reduce the error used to calculate said gradient.
        - - -
         
        - - +
        SGDFactory 
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/optimization/implementations/package-tree.html b/docs/jdocs/neureka/optimization/implementations/package-tree.html index c7d9e168a..e0280123c 100644 --- a/docs/jdocs/neureka/optimization/implementations/package-tree.html +++ b/docs/jdocs/neureka/optimization/implementations/package-tree.html @@ -1,80 +1,143 @@ - + + - -neureka.optimization.implementations Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.optimization.implementations Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.optimization.implementations

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/optimization/package-frame.html b/docs/jdocs/neureka/optimization/package-frame.html new file mode 100644 index 000000000..ca2c0eb06 --- /dev/null +++ b/docs/jdocs/neureka/optimization/package-frame.html @@ -0,0 +1,21 @@ + + + + + +neureka.optimization (neureka 1.0.1 API) + + + + +

    neureka.optimization

    + + + diff --git a/docs/jdocs/neureka/optimization/package-summary.html b/docs/jdocs/neureka/optimization/package-summary.html index 869d6a3ef..6d61f2306 100644 --- a/docs/jdocs/neureka/optimization/package-summary.html +++ b/docs/jdocs/neureka/optimization/package-summary.html @@ -1,102 +1,150 @@ - + + - -neureka.optimization (neureka 1.0.0 API) - - - - + +neureka.optimization (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.optimization

    +

    Package neureka.optimization

    -
    -
    package neureka.optimization
    -
    -
    -
    + + + + diff --git a/docs/jdocs/neureka/optimization/package-tree.html b/docs/jdocs/neureka/optimization/package-tree.html index 622b12f34..9c445a854 100644 --- a/docs/jdocs/neureka/optimization/package-tree.html +++ b/docs/jdocs/neureka/optimization/package-tree.html @@ -1,77 +1,140 @@ - + + - -neureka.optimization Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.optimization Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.optimization

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/package-frame.html b/docs/jdocs/neureka/package-frame.html new file mode 100644 index 000000000..af6cdc745 --- /dev/null +++ b/docs/jdocs/neureka/package-frame.html @@ -0,0 +1,35 @@ + + + + + +neureka (neureka 1.0.1 API) + + + + +

    neureka

    + + + diff --git a/docs/jdocs/neureka/package-summary.html b/docs/jdocs/neureka/package-summary.html index d26ca1bf7..88f28517c 100644 --- a/docs/jdocs/neureka/package-summary.html +++ b/docs/jdocs/neureka/package-summary.html @@ -1,163 +1,231 @@ - + + - -neureka (neureka 1.0.0 API) - - - - + +neureka (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka

    -
    -
    -
    package neureka
    -
    -
      -
    • - -
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      -
      Data<V>
      -
      +

      Package neureka

      +
      +
      +
        +
      • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Interface Summary 
        InterfaceDescription
        Data<V>
        A wrapper type for the raw data array of a tensor/nd-array, - which is typically provided by implementations of the Device interface.
        - - -
        + which is typically provided by implementations of the Device interface.
        +
        MutateNda<T>
        Nd-arrays should be used as immutable data structures mostly, however sometimes it is important to mutate their state for performance reasons.
        - - -
        -
        Instances of this are being returned by the Nda.at(int...) method, +
        MutateNda.Item<V> +
        Instances of this are being returned by the Nda.at(int...) method, and they allow you to get or set individual nd-array items
        - - -
        +
        MutateTensor<T>
        Tensors should be considered immutable, however sometimes it is important to mutate their state for performance reasons.
        - -
        Nda<V>
        -
        -
        Nda, which is an abbreviation of 'N-Dimensional-Array', represents +
        Nda<V> +
        Nda, which is an abbreviation of 'N-Dimensional-Array', represents a multidimensional, homogeneously filled fixed-size array of items.
        - - -
        -
        Instances of this are being returned by the Nda.at(int...) method, +
        Nda.Item<V> +
        Instances of this are being returned by the Nda.at(int...) method, and they allow you to get individual nd-array items
        - - -
        -
        Neureka is the key access point for thread local / global library settings ( seeNeureka.Settings) - as well as execution contexts (see BackendContext) - and pre-instantiated Functions.
        -
        - -
         
        - -
        +
        Shape
        Basically a tuple of integers which is used to describe the shape of an array.
        - - -
        -
        A Tensor is a mathematical concept and type of multidimensional +
        Tensor<V> +
        A Tensor is a mathematical concept and type of multidimensional data-structure with certain transformation properties.
        - - -
        -
        Use this enum as argument for the Tensor.asImage(Tensor.ImageType) method to +
        +
      • +
      • + + + + + + + + + + + + + + + + +
        Class Summary 
        ClassDescription
        Neureka +
        Neureka is the key access point for thread local / global library settings ( seeNeureka.Settings) + as well as execution contexts (see BackendContext) + and pre-instantiated Functions.
        +
        Neureka.Utility 
        +
      • +
      • + + + + + + + + + + + + +
        Enum Summary 
        EnumDescription
        Tensor.ImageType +
        Use this enum as argument for the Tensor.asImage(Tensor.ImageType) method to specify the type of image that should be returned.
        - - - - +
      -
    -
    + + + + diff --git a/docs/jdocs/neureka/package-tree.html b/docs/jdocs/neureka/package-tree.html index 3e90c7115..36e831a39 100644 --- a/docs/jdocs/neureka/package-tree.html +++ b/docs/jdocs/neureka/package-tree.html @@ -1,137 +1,196 @@ - + + - -neureka Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/neureka/view/NDPrintSettings.html b/docs/jdocs/neureka/view/NDPrintSettings.html index 7eef2ed48..0b23f1530 100644 --- a/docs/jdocs/neureka/view/NDPrintSettings.html +++ b/docs/jdocs/neureka/view/NDPrintSettings.html @@ -1,614 +1,852 @@ - + + - -NDPrintSettings (neureka 1.0.0 API) - - - - + +NDPrintSettings (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.view
    -

    Class NDPrintSettings

    -
    -
    java.lang.Object -
    neureka.view.NDPrintSettings
    +
    neureka.view
    +

    Class NDPrintSettings

    -
    -
    -
    public final class NDPrintSettings -extends Object
    -
    This is simply a mutable container for configuring how Tensor - instances ought to be converted to Strings.
    -
    -
    -
      - +
      +
        +
      • java.lang.Object
      • -
        -

        Constructor Summary

        -
        Constructors
        -
        -
        Constructor
        -
        Description
        - -
         
        +
          +
        • neureka.view.NDPrintSettings
        • +
        +
      • +
      +
      +
        +
      • +
        +
        +
        public final class NDPrintSettings
        +extends java.lang.Object
        +
        This is simply a mutable container for configuring how Tensor + instances ought to be converted to Strings.
        +
      • +
      -
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        NDPrintSettings

        -
        public NDPrintSettings(Supplier<Boolean> notModifiable)
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            NDPrintSettings

            +
            public NDPrintSettings(java.util.function.Supplier<java.lang.Boolean> notModifiable)
          -
    • +
    -
  • -
    -

    Method Details

    - + + + + + + + +
      +
    • +

      getCellSize

      +
      public int getCellSize()
      A cell size refers to the number of characters reserved to - the String representation of a single element. - This property only becomes relevant when the getIsCellBound() + the String representation of a single element. + This property only becomes relevant when the getIsCellBound() flag is set. This will then cause the width of the cell to be always of the specified size.
      -
      -
      Returns:
      +
      +
      Returns:
      The width of the cell in terms of numbers of characters.
      -
  • -
  • -
    -

    setCellSize

    -
    public NDPrintSettings setCellSize(int cellSize)
    + + + + +
      +
    • +

      setCellSize

      +
      public NDPrintSettings setCellSize(int cellSize)
      A cell size refers to the number of characters reserved to - the String representation of a single element. - This property only becomes relevant when the getIsCellBound() + the String representation of a single element. + This property only becomes relevant when the getIsCellBound() flag is set. This will then cause the width of the cell to be always of the specified size.
      -
      -
      Parameters:
      +
      +
      Parameters:
      cellSize - The width of the cell in terms of numbers of characters.
      -
  • -
  • -
    -

    getRowLimit

    -
    public int getRowLimit()
    + + + + +
      +
    • +

      getRowLimit

      +
      public int getRowLimit()
      Very large tensors with a rank larger than 1 might take a lot - of vertical space when converted to a String. + of vertical space when converted to a String. This property is the maximum number of matrix rows printed. It determines at which point the number of rows ought to be pruned.
      -
      -
      Returns:
      -
      The maximum number of rows in the String representation of the tensor.
      +
      +
      Returns:
      +
      The maximum number of rows in the String representation of the tensor.
      -
  • -
  • -
    -

    setRowLimit

    -
    public NDPrintSettings setRowLimit(int shortage)
    + + + + +
      +
    • +

      setRowLimit

      +
      public NDPrintSettings setRowLimit(int shortage)
      Very large tensors with a rank larger than 1 might take a lot - of vertical space when converted to a String. + of vertical space when converted to a String. This property is the maximum number of matrix rows printed. It determines at which point the number of rows ought to be pruned.
      -
      -
      Parameters:
      -
      shortage - The maximum number of rows in the String representation of the tensor.
      +
      +
      Parameters:
      +
      shortage - The maximum number of rows in the String representation of the tensor.
      -
  • -
  • -
    -

    getHasGradient

    -
    public boolean getHasGradient()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getHasGradient

      +
      public boolean getHasGradient()
      +
      +
      Returns:
      The truth value determining if the tensor should also print its gradient.
      -
  • -
  • -
    -

    setHasGradient

    -
    public NDPrintSettings setHasGradient(boolean hasGradient)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      setHasGradient

      +
      public NDPrintSettings setHasGradient(boolean hasGradient)
      +
      +
      Parameters:
      hasGradient - The truth value determining if the tensor should also print its gradient.
      -
  • -
  • -
    -

    getIsScientific

    -
    public boolean getIsScientific()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getIsScientific

      +
      public boolean getIsScientific()
      +
      +
      Returns:
      The truth value determining if numeric values should be formatted in scientific notation.
      -
  • -
  • -
    -

    setIsScientific

    -
    public NDPrintSettings setIsScientific(boolean isScientific)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      setIsScientific

      +
      public NDPrintSettings setIsScientific(boolean isScientific)
      +
      +
      Parameters:
      isScientific - The truth value determining if numeric values should be formatted in scientific notation.
      -
  • -
  • -
    -

    getIsMultiline

    -
    public boolean getIsMultiline()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getIsMultiline

      +
      public boolean getIsMultiline()
      +
      +
      Returns:
      The truth value determining if the tensor should be printed in one line or across multiple lines.
      -
  • -
  • -
    -

    setIsMultiline

    -
    public NDPrintSettings setIsMultiline(boolean isMultiline)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      setIsMultiline

      +
      public NDPrintSettings setIsMultiline(boolean isMultiline)
      +
      +
      Parameters:
      isMultiline - The truth value determining if the tensor should be printed in one line or across multiple lines.
      -
  • -
  • -
    -

    getHasSlimNumbers

    -
    public boolean getHasSlimNumbers()
    -
    -
    Returns:
    + + + + +
      +
    • +

      getHasSlimNumbers

      +
      public boolean getHasSlimNumbers()
      +
      +
      Returns:
      The truth value determining if numbers should be formatted more compactly (1.0 to 1).
      -
  • -
  • -
    -

    setHasSlimNumbers

    -
    public NDPrintSettings setHasSlimNumbers(boolean haveSlimNumbers)
    -
    -
    Parameters:
    + + + + +
      +
    • +

      setHasSlimNumbers

      +
      public NDPrintSettings setHasSlimNumbers(boolean haveSlimNumbers)
      +
      +
      Parameters:
      haveSlimNumbers - The truth value determining if numbers should be formatted more compactly (1.0 to 1).
      -
  • -
  • -
    -

    getHasValue

    -
    public boolean getHasValue()
    -
    -
    Returns:
    -
    The truth value determining if the values of the tensor should be included in the String representation.
    + + + + +
      +
    • +

      getHasValue

      +
      public boolean getHasValue()
      +
      +
      Returns:
      +
      The truth value determining if the values of the tensor should be included in the String representation.
      -
  • -
  • -
    -

    setHasValue

    -
    public NDPrintSettings setHasValue(boolean hasValue)
    -
    -
    Parameters:
    -
    hasValue - The truth value determining if the values of the tensor should be included in the String representation.
    + + + + +
      +
    • +

      setHasValue

      +
      public NDPrintSettings setHasValue(boolean hasValue)
      +
      +
      Parameters:
      +
      hasValue - The truth value determining if the values of the tensor should be included in the String representation.
      -
  • -
  • -
    -

    getHasShape

    -
    public boolean getHasShape()
    -
    -
    Returns:
    -
    The truth value determining if the tensor should have its shape included in the String.
    + + + + +
      +
    • +

      getHasShape

      +
      public boolean getHasShape()
      +
      +
      Returns:
      +
      The truth value determining if the tensor should have its shape included in the String.
      -
  • -
  • -
    -

    setHasShape

    -
    public NDPrintSettings setHasShape(boolean hasShape)
    -
    -
    Parameters:
    -
    hasShape - The truth value determining if the tensor should have its shape included in the String.
    + + + + +
      +
    • +

      setHasShape

      +
      public NDPrintSettings setHasShape(boolean hasShape)
      +
      +
      Parameters:
      +
      hasShape - The truth value determining if the tensor should have its shape included in the String.
      -
  • -
  • -
    -

    getHasRecursiveGraph

    -
    public boolean getHasRecursiveGraph()
    -
    -
    Returns:
    -
    The truth value determining if the String representation of the + + + + +
      +
    • +

      getHasRecursiveGraph

      +
      public boolean getHasRecursiveGraph()
      +
      +
      Returns:
      +
      The truth value determining if the String representation of the tensor should have its computation graph attached (if present).
      -
  • -
  • -
    -

    setHasRecursiveGraph

    -
    public NDPrintSettings setHasRecursiveGraph(boolean hasRecursiveGraph)
    -
    -
    Parameters:
    -
    hasRecursiveGraph - The truth value determining if the String representation of the + + + + +
      +
    • +

      setHasRecursiveGraph

      +
      public NDPrintSettings setHasRecursiveGraph(boolean hasRecursiveGraph)
      +
      +
      Parameters:
      +
      hasRecursiveGraph - The truth value determining if the String representation of the tensor should have its computation graph attached (if present).
      -
  • -
  • -
    -

    getHasDerivatives

    -
    public boolean getHasDerivatives()
    -
    + + + + +
      +
    • +

      getHasDerivatives

      +
      public boolean getHasDerivatives()
    • -
    • -
      -

      setHasDerivatives

      -
      public NDPrintSettings setHasDerivatives(boolean hasDerivatives)
      -
      +
    + + + +
      +
    • +

      setHasDerivatives

      +
      public NDPrintSettings setHasDerivatives(boolean hasDerivatives)
    • -
    • -
      -

      getIsCellBound

      -
      public boolean getIsCellBound()
      -
      +
    + + + +
      +
    • +

      getIsCellBound

      +
      public boolean getIsCellBound()
    • -
    • -
      -

      setIsCellBound

      -
      public NDPrintSettings setIsCellBound(boolean isCellBound)
      -
      +
    + + + +
      +
    • +

      setIsCellBound

      +
      public NDPrintSettings setIsCellBound(boolean isCellBound)
    • -
    • -
      -

      getPrefix

      -
      public String getPrefix()
      -
      -
      Returns:
      -
      The String which will be prepended at the beginning of a Tensor string representation.
      +
    + + + +
      +
    • +

      getPrefix

      +
      public java.lang.String getPrefix()
      +
      +
      Returns:
      +
      The String which will be prepended at the beginning of a Tensor string representation.
      -
    • -
    • -
      -

      setPrefix

      -
      public NDPrintSettings setPrefix(String prefix)
      -
      -
      Parameters:
      -
      prefix - The String which will be prepended at the beginning of a Tensor string representation.
      +
    + + + +
      +
    • +

      setPrefix

      +
      public NDPrintSettings setPrefix(java.lang.String prefix)
      +
      +
      Parameters:
      +
      prefix - The String which will be prepended at the beginning of a Tensor string representation.
      -
    • -
    • -
      -

      getPostfix

      -
      public String getPostfix()
      -
      -
      Returns:
      -
      The String which will be appended at the end of a Tensor string representation.
      +
    + + + +
      +
    • +

      getPostfix

      +
      public java.lang.String getPostfix()
      +
      +
      Returns:
      +
      The String which will be appended at the end of a Tensor string representation.
      -
    • -
    • -
      -

      setPostfix

      -
      public NDPrintSettings setPostfix(String postfix)
      -
      -
      Parameters:
      -
      postfix - The String which will be appended at the end of a Tensor string representation.
      +
    + + + +
      +
    • +

      setPostfix

      +
      public NDPrintSettings setPostfix(java.lang.String postfix)
      +
      +
      Parameters:
      +
      postfix - The String which will be appended at the end of a Tensor string representation.
      -
    • -
    • -
      -

      getIndent

      -
      public String getIndent()
      -
      -
      Returns:
      +
    + + + +
      +
    • +

      getIndent

      +
      public java.lang.String getIndent()
      +
      +
      Returns:
      The indent step for a single level of nesting for - String representation where - the getIsMultiline() is set to true.
      + String representation where + the getIsMultiline() is set to true.
      -
    • -
    • -
      -

      setIndent

      -
      public NDPrintSettings setIndent(String indent)
      -
      -
      Parameters:
      +
    + + + +
      +
    • +

      setIndent

      +
      public NDPrintSettings setIndent(java.lang.String indent)
      +
      +
      Parameters:
      indent - The indent step for a single level of nesting for - String representation where - the getIsMultiline() is set to true.
      + String representation where + the getIsMultiline() is set to true.
      -
    • -
    • -
      -

      getIsLegacy

      -
      public boolean getIsLegacy()
      +
    + + + +
      +
    • +

      getIsLegacy

      +
      public boolean getIsLegacy()
      This flag determines the usage of bracket types, where "[1x3]:(1, 2, 3)" would be the legacy version of "(1x3):[1, 2, 3]".
      -
      -
      Returns:
      +
      +
      Returns:
      The truth value determining the type of brackets used.
      -
    • -
    • -
      -

      setIsLegacy

      -
      public NDPrintSettings setIsLegacy(boolean legacy)
      +
    + + + +
      +
    • +

      setIsLegacy

      +
      public NDPrintSettings setIsLegacy(boolean legacy)
      This flag determines the usage of bracket types, where "[1x3]:(1, 2, 3)" would be the legacy version of "(1x3):[1, 2, 3]".
      -
      -
      Parameters:
      +
      +
      Parameters:
      legacy - The truth value determining the type of brackets used.
      -
    • -
    • -
      -

      with

      -
      public NDPrintSettings with(String modes)
      -
      -
      Parameters:
      -
      modes - A String in which letters will be translated to settings.
      -
      Returns:
      -
      A NDPrintSettings configuration based on the provided modes.
      +
    + + + +
      +
    • +

      with

      +
      public NDPrintSettings with(java.lang.String modes)
      +
      +
      Parameters:
      +
      modes - A String in which letters will be translated to settings.
      +
      Returns:
      +
      A NDPrintSettings configuration based on the provided modes.
      -
    -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/view/NdaAsString.Builder.html b/docs/jdocs/neureka/view/NdaAsString.Builder.html index af21800b0..e1dd30e41 100644 --- a/docs/jdocs/neureka/view/NdaAsString.Builder.html +++ b/docs/jdocs/neureka/view/NdaAsString.Builder.html @@ -1,164 +1,266 @@ - + + - -NdaAsString.Builder (neureka 1.0.0 API) - - - - + +NdaAsString.Builder (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.view
    -

    Interface NdaAsString.Builder

    +
    neureka.view
    +

    Interface NdaAsString.Builder

    -
    -
    +
    +
    +
      +
    • +
      Enclosing class:
      -
      NdaAsString
      +
      NdaAsString

      -
      public static interface NdaAsString.Builder
      +
      +
      public static interface NdaAsString.Builder
      A builder interface providing multiple different options for building - a NdaAsString instance in a fluent way.
      -
    -
    -
    - +
    +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
        -
      • -
        -

        withConfig

        -
        NdaAsString withConfig(NDPrintSettings configMap)
        -
        -
        Parameters:
        -
        configMap - The configuration map used as basis for turning the wrapped Tensor to a String.
        -
        Returns:
        -
        A new NdaAsString based on the provided configuration.
        +
          +
        • + + +

          Method Detail

          + + + +
            +
          • +

            withConfig

            +
            NdaAsString withConfig(NDPrintSettings configMap)
            +
            +
            Parameters:
            +
            configMap - The configuration map used as basis for turning the wrapped Tensor to a String.
            +
            Returns:
            +
            A new NdaAsString based on the provided configuration.
            -
      • -
      • -
        -

        withConfig

        -
        NdaAsString withConfig(String config)
        -
        -
        Parameters:
        -
        config - The configuration used as basis for turning the wrapped Tensor to a String.
        -
        Returns:
        -
        A new NdaAsString based on the provided configuration.
        +
      + + + +
        +
      • +

        withConfig

        +
        NdaAsString withConfig(java.lang.String config)
        +
        +
        Parameters:
        +
        config - The configuration used as basis for turning the wrapped Tensor to a String.
        +
        Returns:
        +
        A new NdaAsString based on the provided configuration.
        -
    • -
    • -
      -

      byDefaults

      -
      NdaAsString byDefaults()
      -
      -
      Returns:
      -
      A new NdaAsString based on the default configuration.
      +
    + + + +
      +
    • +

      byDefaults

      +
      NdaAsString byDefaults()
      +
      +
      Returns:
      +
      A new NdaAsString based on the default configuration.
      -
    - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/view/NdaAsString.Util.html b/docs/jdocs/neureka/view/NdaAsString.Util.html index b551b8430..eea764b0c 100644 --- a/docs/jdocs/neureka/view/NdaAsString.Util.html +++ b/docs/jdocs/neureka/view/NdaAsString.Util.html @@ -1,196 +1,318 @@ - + + - -NdaAsString.Util (neureka 1.0.0 API) - - - - + +NdaAsString.Util (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.view
    -

    Class NdaAsString.Util

    -
    -
    java.lang.Object -
    neureka.view.NdaAsString.Util
    +
    neureka.view
    +

    Class NdaAsString.Util

    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.view.NdaAsString.Util
      • +
      +
    • +
    +
    +
      +
    • +
      Enclosing class:
      -
      NdaAsString
      +
      NdaAsString

      -
      public static class NdaAsString.Util -extends Object
      +
      +
      public static class NdaAsString.Util
      +extends java.lang.Object
      This class is a simple utility class which contains a collection of static and stateless methods containing useful functionalities for tensor stringification.
      -
    -
    -
      - -
    • -
      -

      Constructor Summary

      -
      Constructors
      -
      -
      Constructor
      -
      Description
      - -
       
      +
    • +
    - +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Util() 
      • +
      -
    • -
      -

      Method Summary

      -
      -
      -
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      -
      static String
      -
      indent(int n)
      -
       
      -
      static String
      -
      pad(int left, - String s)
      -
       
      -
      static String
      -
      pad(String s, - int right)
      -
       
      -
      static String
      -
      spaces(int n)
      -
       
      -
      -
      -
      -
      -

      Methods inherited from class java.lang.Object

      -clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      -
      +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static java.lang.Stringindent(int n) 
        static java.lang.Stringpad(int left, + java.lang.String s) 
        static java.lang.Stringpad(java.lang.String s, + int right) 
        static java.lang.Stringspaces(int n) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
      - -
      -
        + +
      +
    +
    +
      +
    • -
    • -
      -

      Constructor Details

      -
        -
      • -
        -

        Util

        -
        public Util()
        -
        +
          +
        • + + +

          Constructor Detail

          + + + +
            +
          • +

            Util

            +
            public Util()
          -
    • +
    -
  • -
    -

    Method Details

    -
      -
    • -
      -

      indent

      -
      public static String indent(int n)
      -
      +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          indent

          +
          public static java.lang.String indent(int n)
        • -
        • -
          -

          spaces

          -
          public static String spaces(int n)
          -
          +
        + + + +
          +
        • +

          spaces

          +
          public static java.lang.String spaces(int n)
        • -
        • -
          -

          pad

          -
          public static String pad(int left, - String s)
          -
          +
        + + + +
          +
        • +

          pad

          +
          public static java.lang.String pad(int left,
          +                                   java.lang.String s)
        • -
        • -
          -

          pad

          -
          public static String pad(String s, - int right)
          -
          +
        + + + +
          +
        • +

          pad

          +
          public static java.lang.String pad(java.lang.String s,
          +                                   int right)
        -
  • - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/view/NdaAsString.html b/docs/jdocs/neureka/view/NdaAsString.html index 2fe0b49f9..2807051d1 100644 --- a/docs/jdocs/neureka/view/NdaAsString.html +++ b/docs/jdocs/neureka/view/NdaAsString.html @@ -1,179 +1,291 @@ - + + - -NdaAsString (neureka 1.0.0 API) - - - - + +NdaAsString (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -
    Package neureka.view
    -

    Class NdaAsString

    +
    neureka.view
    +

    Class NdaAsString

    -
    java.lang.Object -
    neureka.view.NdaAsString
    -
    -
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • neureka.view.NdaAsString
      • +
      +
    • +
    +
    +
      +

    • -
      public final class NdaAsString -extends Object
      +
      +
      public final class NdaAsString
      +extends java.lang.Object
      This class is in essence a simple wrapper class for a tensor and a StringBuilder Methods in this class use the builder in order to construct a String representation for said tensor. These methods perform the String building based on a set of certain - configurations provided by the NDPrintSettings!
      -
    -
    -
    + + +
    +
    + - -
    -
      +
    +
    +
      +
    • -
    • -
      -

      Method Details

      -
    -
  • -
    -

    toString

    -
    public String toString()
    -
    -
    Overrides:
    -
    toString in class Object
    + + + + +
      +
    • +

      toString

      +
      public java.lang.String toString()
      +
      +
      Overrides:
      +
      toString in class java.lang.Object
      -
  • - - + + +
    +
    - + + + + diff --git a/docs/jdocs/neureka/view/package-frame.html b/docs/jdocs/neureka/view/package-frame.html new file mode 100644 index 000000000..5fc5b7013 --- /dev/null +++ b/docs/jdocs/neureka/view/package-frame.html @@ -0,0 +1,25 @@ + + + + + +neureka.view (neureka 1.0.1 API) + + + + +

    neureka.view

    + + + diff --git a/docs/jdocs/neureka/view/package-summary.html b/docs/jdocs/neureka/view/package-summary.html index 205c8b6ba..02cc46a64 100644 --- a/docs/jdocs/neureka/view/package-summary.html +++ b/docs/jdocs/neureka/view/package-summary.html @@ -1,119 +1,176 @@ - + + - -neureka.view (neureka 1.0.0 API) - - - - + +neureka.view (neureka 1.0.1 API) - - - - - - + + -
    - -
    -
    -

    Package neureka.view

    -
    -
    -
    package neureka.view
    -
    -
      -
    • - +

      Package neureka.view

      +
    +
    +
      +
    • + + + + + + + + + + + + +
      Interface Summary 
      InterfaceDescription
      NdaAsString.Builder +
      A builder interface providing multiple different options for building + a NdaAsString instance in a fluent way.
      +
    • -
    • -
      -
      -
      -
      -
      Class
      -
      Description
      - -
      +
    • + + + + + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      NdaAsString
      This class is in essence a simple wrapper class for a tensor and a StringBuilder Methods in this class use the builder in order to construct a String representation for said tensor.
      - - -
      -
      A builder interface providing multiple different options for building - a NdaAsString instance in a fluent way.
      -
      - -
      +
      NdaAsString.Util
      This class is a simple utility class which contains a collection of static and stateless methods containing useful functionalities for tensor stringification.
      - - -
      -
      This is simply a mutable container for configuring how Tensor - instances ought to be converted to Strings.
      -
      - - - +
      NDPrintSettings +
      This is simply a mutable container for configuring how Tensor + instances ought to be converted to Strings.
      +
    - -
    + + + + diff --git a/docs/jdocs/neureka/view/package-tree.html b/docs/jdocs/neureka/view/package-tree.html index 102a89e73..0aa36153c 100644 --- a/docs/jdocs/neureka/view/package-tree.html +++ b/docs/jdocs/neureka/view/package-tree.html @@ -1,79 +1,140 @@ - + + - -neureka.view Class Hierarchy (neureka 1.0.0 API) - - - - + +neureka.view Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For Package neureka.view

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -

    Interface Hierarchy

    -
    -
    + + + + diff --git a/docs/jdocs/overview-frame.html b/docs/jdocs/overview-frame.html new file mode 100644 index 000000000..34b440c2c --- /dev/null +++ b/docs/jdocs/overview-frame.html @@ -0,0 +1,88 @@ + + + + + +Overview List (neureka 1.0.1 API) + + + + + +
    +

    Packages

    + +
    +

     

    + + diff --git a/docs/jdocs/overview-summary.html b/docs/jdocs/overview-summary.html index fea7e5efe..4d59a8b44 100644 --- a/docs/jdocs/overview-summary.html +++ b/docs/jdocs/overview-summary.html @@ -1,25 +1,468 @@ - + + - -neureka 1.0.0 API - - - - - + +Overview (neureka 1.0.1 API) - - + - -
    + + -

    index.html

    -
    + +
    + + + + + + + +
    + + +
    +

    neureka 1.0.1 API

    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Packages 
    PackageDescription
    neureka 
    neureka.autograd 
    neureka.backend.api 
    neureka.backend.api.fun 
    neureka.backend.api.ini 
    neureka.backend.api.template.algorithms 
    neureka.backend.api.template.implementations 
    neureka.backend.api.template.operations 
    neureka.backend.cpu 
    neureka.backend.main.algorithms +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.algorithms.internal 
    neureka.backend.main.implementations +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.implementations.broadcast 
    neureka.backend.main.implementations.convolution 
    neureka.backend.main.implementations.elementwise 
    neureka.backend.main.implementations.fun 
    neureka.backend.main.implementations.fun.api 
    neureka.backend.main.implementations.linear 
    neureka.backend.main.implementations.matmul 
    neureka.backend.main.implementations.scalar 
    neureka.backend.main.internal +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.memory +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.operations +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.operations.functions +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.operations.indexer +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.operations.linear +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.operations.linear.internal.blas +
    Everything in this package should be considered library-private! + DO NOT USE CLASSES INSIDE THIS PACKAGE!
    +
    neureka.backend.main.operations.linear.internal.opencl 
    neureka.backend.main.operations.operator +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.operations.other +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.backend.main.operations.other.internal 
    neureka.backend.ocl 
    neureka.common.composition 
    neureka.common.utility 
    neureka.devices 
    neureka.devices.file 
    neureka.devices.host 
    neureka.devices.host.concurrent +
    Everything in this package should be considered library-private! + DO NOT USE CLASSES INSIDE THIS PACKAGE!
    +
    neureka.devices.host.machine +
    Everything in this package should be considered library-private! + DO NOT USE CLASSES INSIDE THIS PACKAGE!
    +
    neureka.devices.opencl 
    neureka.devices.opencl.utility 
    neureka.dtype 
    neureka.dtype.custom +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.fluent.building 
    neureka.fluent.building.states 
    neureka.fluent.slicing 
    neureka.fluent.slicing.states 
    neureka.framing 
    neureka.framing.fluent 
    neureka.math 
    neureka.math.args 
    neureka.math.implementations +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.math.parsing +
    Everything in this package should be considered library-private! + DO NOT DEPEND ON CLASSES INSIDE THIS PACKAGE! + Code inside this package or any sub-packages might change frequently...
    +
    neureka.ndim 
    neureka.ndim.config 
    neureka.ndim.config.types 
    neureka.ndim.config.types.permuted 
    neureka.ndim.config.types.simple 
    neureka.ndim.config.types.sliced 
    neureka.ndim.config.types.views 
    neureka.ndim.config.types.views.virtual 
    neureka.ndim.iterator 
    neureka.ndim.iterator.types.permuted 
    neureka.ndim.iterator.types.simple 
    neureka.ndim.iterator.types.sliced 
    neureka.ndim.iterator.types.virtual 
    neureka.optimization 
    neureka.optimization.implementations 
    neureka.view 
    +
    + +
    + + + + + + + +
    + + diff --git a/docs/jdocs/overview-tree.html b/docs/jdocs/overview-tree.html index 8ccdf7ab4..2c8d99b3a 100644 --- a/docs/jdocs/overview-tree.html +++ b/docs/jdocs/overview-tree.html @@ -1,55 +1,75 @@ - + + - -Class Hierarchy (neureka 1.0.0 API) - - - - + +Class Hierarchy (neureka 1.0.1 API) - - - - - - + + -
    - -
    -

    Hierarchy For All Packages

    -Package Hierarchies: +Package Hierarchies:
    -
    +

    Class Hierarchy

    -
    -
    +
  • neureka.math.args.Arg.Axis
  • +
  • neureka.math.args.Arg.Derivative<V>
  • +
  • neureka.math.args.Arg.DerivIdx
  • +
  • neureka.math.args.Arg.Ends
  • +
  • neureka.math.args.Arg.Indices
  • +
  • neureka.math.args.Arg.Layout
  • +
  • neureka.math.args.Arg.MinRank
  • +
  • neureka.math.args.Arg.Offset
  • +
  • neureka.math.args.Arg.Seed
  • +
  • neureka.math.args.Arg.Shape
  • +
  • neureka.math.args.Arg.Stride
  • +
  • neureka.math.args.Arg.TargetDevice
  • +
  • neureka.math.args.Arg.VarIdx
  • + + +
  • neureka.framing.fluent.AxisFrame<G,V>
  • +
  • neureka.framing.fluent.AxisFrame.Builder<SetType,GetType,ValueType>
  • +
  • neureka.fluent.slicing.AxisSliceBuilder<V> (implements neureka.fluent.slicing.states.AxisOrGetTensor<V>, neureka.fluent.slicing.states.FromOrAtTensor<V>, neureka.fluent.slicing.states.StepsOrAxisOrGetTensor<V>, neureka.fluent.slicing.states.ToForTensor<V>)
  • +
  • neureka.backend.main.operations.linear.internal.blas.AXPY
  • +
  • neureka.backend.api.BackendContext (implements java.lang.Cloneable)
  • +
  • neureka.backend.api.BackendContext.Runner
  • +
  • neureka.backend.api.BackendExtension.DeviceOption
  • +
  • neureka.backend.api.ini.BackendRegistry
  • +
  • neureka.devices.host.machine.BasicMachine + +
  • +
  • neureka.common.utility.Cache<O>
  • +
  • neureka.common.utility.Cache.LazyEntry<K,V>
  • +
  • neureka.backend.api.Call<D> + +
  • +
  • neureka.backend.api.Call.Builder<V,T>
  • +
  • neureka.backend.api.Call.Validator
  • +
  • neureka.backend.api.Call.Validator.Estimator
  • +
  • neureka.backend.ocl.CLBackend (implements neureka.backend.api.BackendExtension)
  • +
  • neureka.backend.main.implementations.linear.CLDot (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.devices.opencl.utility.CLFunctionCompiler
  • +
  • neureka.backend.main.operations.linear.internal.opencl.CLGEMM (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.implementations.elementwise.CLRandomization (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.operations.linear.internal.opencl.CLReduce (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.implementations.scalar.CLScalarFunction (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.ocl.CLSettings
  • +
  • neureka.backend.main.operations.linear.internal.opencl.CLSum (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.operations.ConvUtil
  • +
  • neureka.backend.main.operations.linear.internal.blas.COPY
  • +
  • neureka.devices.host.CPU.JVMExecutor
  • +
  • neureka.backend.cpu.CPUBackend (implements neureka.backend.api.BackendExtension)
  • +
  • neureka.backend.main.implementations.elementwise.CPUBiElementWise (implements neureka.backend.api.ImplementationFor<D>) + +
  • +
  • neureka.backend.main.implementations.broadcast.CPUBroadcast (implements neureka.backend.api.ImplementationFor<D>) + +
  • +
  • neureka.backend.main.implementations.linear.CPUDot (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.implementations.elementwise.CPUElementwiseFunction (implements neureka.backend.api.ImplementationFor<D>) + +
  • +
  • neureka.backend.main.implementations.matmul.CPUMatMul (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.implementations.elementwise.CPURandomization (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.operations.other.internal.CPUReduce (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.implementations.broadcast.CPUScalarBroadcast (implements neureka.backend.api.ImplementationFor<D>) + +
  • +
  • neureka.backend.main.implementations.scalar.CPUScalarBroadcastFunction (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.implementations.scalar.CPUScalarFunction (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.backend.main.operations.other.internal.CPUSum (implements neureka.backend.api.ImplementationFor<D>)
  • +
  • neureka.devices.file.CSVHandle
  • +
  • neureka.common.utility.DataConverter
  • +
  • neureka.common.utility.DataConverter.ForTensor
  • +
  • neureka.common.utility.DataConverter.Utility
  • +
  • neureka.dtype.DataType<T>
  • +
  • neureka.devices.opencl.utility.DeviceQuery
  • +
  • neureka.backend.main.operations.linear.internal.blas.DOT
  • +
  • neureka.backend.main.operations.ElemWiseUtil
  • +
  • neureka.backend.api.ExecutionCall.Builder<D>
  • +
  • neureka.dtype.custom.F32
  • +
  • neureka.dtype.custom.F64
  • +
  • neureka.math.FunctionCache
  • +
  • neureka.math.implementations.FunctionConstant (implements neureka.math.Function)
  • +
  • neureka.math.implementations.FunctionInput (implements neureka.math.Function)
  • +
  • neureka.math.implementations.FunctionNode (implements neureka.math.Function)
  • +
  • neureka.math.parsing.FunctionParser
  • +
  • neureka.math.Functions
  • +
  • neureka.math.implementations.FunctionVariable (implements neureka.math.Function)
  • +
  • neureka.backend.main.operations.linear.internal.blas.GEMM
  • +
  • neureka.autograd.GraphNode<V> (implements neureka.common.composition.Component<O>)
  • +
  • neureka.dtype.custom.I16
  • +
  • neureka.dtype.custom.I32
  • +
  • neureka.dtype.custom.I64
  • +
  • neureka.dtype.custom.I8
  • +
  • neureka.backend.main.operations.linear.internal.blas.IAXPY
  • +
  • neureka.backend.main.operations.linear.internal.blas.IDOT
  • +
  • neureka.devices.file.IDXHandle
  • +
  • neureka.backend.main.operations.linear.internal.blas.IGEMM
  • +
  • neureka.autograd.JITProp<V> (implements neureka.common.composition.Component<O>)
  • +
  • neureka.devices.opencl.KernelCache
  • +
  • neureka.devices.opencl.KernelCaller
  • +
  • neureka.devices.opencl.KernelCode
  • +
  • neureka.backend.api.LazyRef<V>
  • +
  • neureka.common.utility.ListReader
  • +
  • neureka.common.utility.ListReader.Result
  • +
  • neureka.common.utility.LogUtil
  • +
  • neureka.backend.main.memory.MemUtil
  • +
  • neureka.backend.main.memory.MemValidator
  • +
  • neureka.devices.opencl.utility.Messages
  • +
  • neureka.optimization.implementations.Momentum<V> (implements neureka.optimization.Optimizer<V>)
  • +
  • neureka.optimization.implementations.MomentumFactory (implements neureka.optimization.OptimizerFactory)
  • +
  • neureka.view.NdaAsString
  • +
  • neureka.view.NdaAsString.Util
  • +
  • neureka.fluent.building.NdaBuilder<V> (implements neureka.fluent.building.states.IterByOrIterFromOrAllTensor<V>, neureka.fluent.building.states.StepForTensor<V>, neureka.fluent.building.states.ToForTensor<V>, neureka.fluent.building.states.WithShapeOrScalarOrVectorOnDevice<V>)
  • +
  • neureka.ndim.config.NDConfiguration.Utility
  • +
  • neureka.framing.NDFrame<V> (implements neureka.common.composition.Component<O>)
  • +
  • neureka.view.NDPrintSettings
  • +
  • neureka.ndim.NDUtil
  • +
  • neureka.Neureka
  • +
  • neureka.Neureka.Settings
  • +
  • neureka.Neureka.Settings.AutoGrad
  • +
  • neureka.Neureka.Settings.Debug
  • +
  • neureka.Neureka.Settings.DType
  • +
  • neureka.Neureka.Settings.NDim
  • +
  • neureka.Neureka.Settings.View
  • +
  • neureka.Neureka.Utility
  • +
  • neureka.devices.opencl.OpenCLDevice.Query
  • +
  • neureka.devices.opencl.OpenCLPlatform
  • +
  • neureka.backend.api.template.operations.OperationBuilder
  • +
  • neureka.math.parsing.ParseUtil
  • +
  • neureka.devices.ReferenceCounter
  • +
  • neureka.devices.ReferenceCounter.ChangeEvent
  • +
  • neureka.framing.Relation<V> (implements neureka.common.composition.Component<O>)
  • +
  • neureka.backend.api.Result
  • +
  • neureka.optimization.implementations.RMSProp<V> (implements neureka.optimization.Optimizer<V>)
  • +
  • neureka.optimization.implementations.RMSPropFactory (implements neureka.optimization.OptimizerFactory)
  • +
  • neureka.backend.main.implementations.fun.ScalarAbsolute (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarCbrt (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarCosinus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarExp (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarGaSU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarGaTU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarGaussian (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarGaussianFast (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarGeLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarIdentity (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarLog10 (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarLogarithm (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarQuadratic (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarReLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarSeLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarSigmoid (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarSiLU (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarSinus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarSoftplus (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarSoftsign (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarSqrt (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarTanh (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.backend.main.implementations.fun.ScalarTanhFast (implements neureka.backend.main.implementations.fun.api.ScalarFun)
  • +
  • neureka.common.utility.SettingsLoader
  • +
  • neureka.optimization.implementations.SGD<V> (implements neureka.optimization.Optimizer<V>)
  • +
  • neureka.optimization.implementations.SGDFactory (implements neureka.optimization.OptimizerFactory)
  • +
  • neureka.fluent.slicing.SliceBuilder<V> (implements neureka.fluent.slicing.states.AxisOrGetTensor<V>)
  • +
  • neureka.ndim.iterator.types.sliced.SlicedNDIterator (implements neureka.ndim.iterator.NDIterator)
  • +
  • neureka.fluent.slicing.SmartSlicer
  • +
  • neureka.dtype.custom.UI16
  • +
  • neureka.dtype.custom.UI32
  • +
  • neureka.dtype.custom.UI64
  • +
  • neureka.dtype.custom.UI8
  • +
  • neureka.backend.main.algorithms.Util
  • +
  • neureka.ndim.iterator.types.virtual.VirtualNDIterator (implements neureka.ndim.iterator.NDIterator)
  • +
  • neureka.devices.host.concurrent.WorkScheduler
  • +
  • neureka.devices.host.concurrent.WorkScheduler.Divider
  • + + +

    Interface Hierarchy

    -
    -
    -

    Enum Class Hierarchy

    +

    Enum Hierarchy

    -
    -
    + +
    + + + + + + + +
    + + diff --git a/docs/jdocs/element-list b/docs/jdocs/package-list similarity index 100% rename from docs/jdocs/element-list rename to docs/jdocs/package-list diff --git a/docs/jdocs/package-search-index.js b/docs/jdocs/package-search-index.js deleted file mode 100644 index b0c0d9266..000000000 --- a/docs/jdocs/package-search-index.js +++ /dev/null @@ -1 +0,0 @@ -packageSearchIndex = [{"l":"All Packages","u":"allpackages-index.html"},{"l":"neureka"},{"l":"neureka.autograd"},{"l":"neureka.backend.api"},{"l":"neureka.backend.api.fun"},{"l":"neureka.backend.api.ini"},{"l":"neureka.backend.api.template.algorithms"},{"l":"neureka.backend.api.template.implementations"},{"l":"neureka.backend.api.template.operations"},{"l":"neureka.backend.cpu"},{"l":"neureka.backend.main.algorithms"},{"l":"neureka.backend.main.algorithms.internal"},{"l":"neureka.backend.main.implementations"},{"l":"neureka.backend.main.implementations.broadcast"},{"l":"neureka.backend.main.implementations.convolution"},{"l":"neureka.backend.main.implementations.elementwise"},{"l":"neureka.backend.main.implementations.fun"},{"l":"neureka.backend.main.implementations.fun.api"},{"l":"neureka.backend.main.implementations.linear"},{"l":"neureka.backend.main.implementations.matmul"},{"l":"neureka.backend.main.implementations.scalar"},{"l":"neureka.backend.main.internal"},{"l":"neureka.backend.main.memory"},{"l":"neureka.backend.main.operations"},{"l":"neureka.backend.main.operations.functions"},{"l":"neureka.backend.main.operations.indexer"},{"l":"neureka.backend.main.operations.linear"},{"l":"neureka.backend.main.operations.linear.internal.blas"},{"l":"neureka.backend.main.operations.linear.internal.opencl"},{"l":"neureka.backend.main.operations.operator"},{"l":"neureka.backend.main.operations.other"},{"l":"neureka.backend.main.operations.other.internal"},{"l":"neureka.backend.ocl"},{"l":"neureka.common.composition"},{"l":"neureka.common.utility"},{"l":"neureka.devices"},{"l":"neureka.devices.file"},{"l":"neureka.devices.host"},{"l":"neureka.devices.host.concurrent"},{"l":"neureka.devices.host.machine"},{"l":"neureka.devices.opencl"},{"l":"neureka.devices.opencl.utility"},{"l":"neureka.dtype"},{"l":"neureka.dtype.custom"},{"l":"neureka.fluent.building"},{"l":"neureka.fluent.building.states"},{"l":"neureka.fluent.slicing"},{"l":"neureka.fluent.slicing.states"},{"l":"neureka.framing"},{"l":"neureka.framing.fluent"},{"l":"neureka.math"},{"l":"neureka.math.args"},{"l":"neureka.math.implementations"},{"l":"neureka.math.parsing"},{"l":"neureka.ndim"},{"l":"neureka.ndim.config"},{"l":"neureka.ndim.config.types"},{"l":"neureka.ndim.config.types.permuted"},{"l":"neureka.ndim.config.types.simple"},{"l":"neureka.ndim.config.types.sliced"},{"l":"neureka.ndim.config.types.views"},{"l":"neureka.ndim.config.types.views.virtual"},{"l":"neureka.ndim.iterator"},{"l":"neureka.ndim.iterator.types.permuted"},{"l":"neureka.ndim.iterator.types.simple"},{"l":"neureka.ndim.iterator.types.sliced"},{"l":"neureka.ndim.iterator.types.virtual"},{"l":"neureka.optimization"},{"l":"neureka.optimization.implementations"},{"l":"neureka.view"}];updateSearchResults(); \ No newline at end of file diff --git a/docs/jdocs/package-search-index.zip b/docs/jdocs/package-search-index.zip deleted file mode 100644 index f6da60379..000000000 Binary files a/docs/jdocs/package-search-index.zip and /dev/null differ diff --git a/docs/jdocs/resources/glass.png b/docs/jdocs/resources/glass.png deleted file mode 100644 index a7f591f46..000000000 Binary files a/docs/jdocs/resources/glass.png and /dev/null differ diff --git a/docs/jdocs/resources/x.png b/docs/jdocs/resources/x.png deleted file mode 100644 index 30548a756..000000000 Binary files a/docs/jdocs/resources/x.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-bg_glass_55_fbf9ee_1x400.png b/docs/jdocs/script-dir/images/ui-bg_glass_55_fbf9ee_1x400.png deleted file mode 100644 index 34abd18f3..000000000 Binary files a/docs/jdocs/script-dir/images/ui-bg_glass_55_fbf9ee_1x400.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-bg_glass_65_dadada_1x400.png b/docs/jdocs/script-dir/images/ui-bg_glass_65_dadada_1x400.png deleted file mode 100644 index f058a9385..000000000 Binary files a/docs/jdocs/script-dir/images/ui-bg_glass_65_dadada_1x400.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-bg_glass_75_dadada_1x400.png b/docs/jdocs/script-dir/images/ui-bg_glass_75_dadada_1x400.png deleted file mode 100644 index 2ce04c165..000000000 Binary files a/docs/jdocs/script-dir/images/ui-bg_glass_75_dadada_1x400.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-bg_glass_75_e6e6e6_1x400.png b/docs/jdocs/script-dir/images/ui-bg_glass_75_e6e6e6_1x400.png deleted file mode 100644 index a90afb8bf..000000000 Binary files a/docs/jdocs/script-dir/images/ui-bg_glass_75_e6e6e6_1x400.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-bg_glass_95_fef1ec_1x400.png b/docs/jdocs/script-dir/images/ui-bg_glass_95_fef1ec_1x400.png deleted file mode 100644 index dbe091f6d..000000000 Binary files a/docs/jdocs/script-dir/images/ui-bg_glass_95_fef1ec_1x400.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-bg_highlight-soft_75_cccccc_1x100.png b/docs/jdocs/script-dir/images/ui-bg_highlight-soft_75_cccccc_1x100.png deleted file mode 100644 index 5dc3593e4..000000000 Binary files a/docs/jdocs/script-dir/images/ui-bg_highlight-soft_75_cccccc_1x100.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-icons_222222_256x240.png b/docs/jdocs/script-dir/images/ui-icons_222222_256x240.png deleted file mode 100644 index e723e17cb..000000000 Binary files a/docs/jdocs/script-dir/images/ui-icons_222222_256x240.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-icons_2e83ff_256x240.png b/docs/jdocs/script-dir/images/ui-icons_2e83ff_256x240.png deleted file mode 100644 index 1f5f49756..000000000 Binary files a/docs/jdocs/script-dir/images/ui-icons_2e83ff_256x240.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-icons_454545_256x240.png b/docs/jdocs/script-dir/images/ui-icons_454545_256x240.png deleted file mode 100644 index 618f5b0ca..000000000 Binary files a/docs/jdocs/script-dir/images/ui-icons_454545_256x240.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-icons_888888_256x240.png b/docs/jdocs/script-dir/images/ui-icons_888888_256x240.png deleted file mode 100644 index ee5e33f27..000000000 Binary files a/docs/jdocs/script-dir/images/ui-icons_888888_256x240.png and /dev/null differ diff --git a/docs/jdocs/script-dir/images/ui-icons_cd0a0a_256x240.png b/docs/jdocs/script-dir/images/ui-icons_cd0a0a_256x240.png deleted file mode 100644 index 7e8ebc180..000000000 Binary files a/docs/jdocs/script-dir/images/ui-icons_cd0a0a_256x240.png and /dev/null differ diff --git a/docs/jdocs/script-dir/jquery-3.5.1.min.js b/docs/jdocs/script-dir/jquery-3.5.1.min.js deleted file mode 100644 index b0614034a..000000000 --- a/docs/jdocs/script-dir/jquery-3.5.1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.5.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.5.1",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function D(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||j,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,j=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="
    ",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0a;a++)for(s in o[a])n=o[a][s],o[a].hasOwnProperty(s)&&void 0!==n&&(e[s]=t.isPlainObject(n)?t.isPlainObject(e[s])?t.widget.extend({},e[s],n):t.widget.extend({},n):n);return e},t.widget.bridge=function(e,s){var n=s.prototype.widgetFullName||e;t.fn[e]=function(o){var a="string"==typeof o,r=i.call(arguments,1),l=this;return a?this.length||"instance"!==o?this.each(function(){var i,s=t.data(this,n);return"instance"===o?(l=s,!1):s?t.isFunction(s[o])&&"_"!==o.charAt(0)?(i=s[o].apply(s,r),i!==s&&void 0!==i?(l=i&&i.jquery?l.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+o+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+o+"'")}):l=void 0:(r.length&&(o=t.widget.extend.apply(null,[o].concat(r))),this.each(function(){var e=t.data(this,n);e?(e.option(o||{}),e._init&&e._init()):t.data(this,n,new s(o,this))})),l}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
    ",options:{classes:{},disabled:!1,create:null},_createWidget:function(i,s){s=t(s||this.defaultElement||this)[0],this.element=t(s),this.uuid=e++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},s!==this&&(t.data(s,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===s&&this.destroy()}}),this.document=t(s.style?s.ownerDocument:s.document||s),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),i),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var l=s.match(/^([\w:-]*)\s*(.*)$/),h=l[1]+o.eventNamespace,c=l[2];c?n.on(h,c,r):i.on(h,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,l=/top|center|bottom/,h=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t("
    "),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.widthi?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};h>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),l.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,l=n-r,h=r+e.collisionWidth-a-n;e.collisionWidth>a?l>0&&0>=h?(i=t.left+l+e.collisionWidth-a-n,t.left+=l-i):t.left=h>0&&0>=l?n:l>h?n+a-e.collisionWidth:n:l>0?t.left+=l:h>0?t.left-=h:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,l=n-r,h=r+e.collisionHeight-a-n;e.collisionHeight>a?l>0&&0>=h?(i=t.top+l+e.collisionHeight-a-n,t.top+=l-i):t.top=h>0&&0>=l?n:l>h?n+a-e.collisionHeight:n:l>0?t.top+=l:h>0?t.top-=h:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,l=n.isWindow?n.scrollLeft:n.offset.left,h=t.left-e.collisionPosition.marginLeft,c=h-l,u=h+e.collisionWidth-r-l,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-l,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,l=n.isWindow?n.scrollTop:n.offset.top,h=t.top-e.collisionPosition.marginTop,c=h-l,u=h+e.collisionHeight-r-l,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,g=-2*e.offset[1];0>c?(s=t.top+p+f+g+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+g)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+g-l,(i>0||u>a(i))&&(t.top+=p+f+g))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.ui.safeActiveElement=function(t){var e;try{e=t.activeElement}catch(i){e=t.body}return e||(e=t.body),e.nodeName||(e=t.body),e},t.widget("ui.menu",{version:"1.12.1",defaultElement:"
      ",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault()},"click .ui-menu-item":function(e){var i=t(e.target),s=t(t.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(e),e.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(e):!this.element.is(":focus")&&s.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(e){if(!this.previousFilter){var i=t(e.target).closest(".ui-menu-item"),s=t(e.currentTarget);i[0]===s[0]&&(this._removeClass(s.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(e,s))}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this.element.find(this.options.items).eq(0);e||this.focus(t,i)},blur:function(e){this._delay(function(){var i=!t.contains(this.element[0],t.ui.safeActiveElement(this.document[0]));i&&this.collapseAll(e)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t),this.mouseHandled=!1}})},_destroy:function(){var e=this.element.find(".ui-menu-item").removeAttr("role aria-disabled"),i=e.children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),i.children().each(function(){var e=t(this);e.data("ui-menu-submenu-caret")&&e.remove()})},_keydown:function(e){var i,s,n,o,a=!0;switch(e.keyCode){case t.ui.keyCode.PAGE_UP:this.previousPage(e);break;case t.ui.keyCode.PAGE_DOWN:this.nextPage(e);break;case t.ui.keyCode.HOME:this._move("first","first",e);break;case t.ui.keyCode.END:this._move("last","last",e);break;case t.ui.keyCode.UP:this.previous(e);break;case t.ui.keyCode.DOWN:this.next(e);break;case t.ui.keyCode.LEFT:this.collapse(e);break;case t.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(e);break;case t.ui.keyCode.ENTER:case t.ui.keyCode.SPACE:this._activate(e);break;case t.ui.keyCode.ESCAPE:this.collapse(e);break;default:a=!1,s=this.previousFilter||"",o=!1,n=e.keyCode>=96&&105>=e.keyCode?""+(e.keyCode-96):String.fromCharCode(e.keyCode),clearTimeout(this.filterTimer),n===s?o=!0:n=s+n,i=this._filterMenuItems(n),i=o&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(e.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(e,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}a&&e.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var e,i,s,n,o,a=this,r=this.options.icons.submenu,l=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),s=l.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var e=t(this),i=e.prev(),s=t("").data("ui-menu-submenu-caret",!0);a._addClass(s,"ui-menu-icon","ui-icon "+r),i.attr("aria-haspopup","true").prepend(s),e.attr("aria-labelledby",i.attr("id"))}),this._addClass(s,"ui-menu","ui-widget ui-widget-content ui-front"),e=l.add(this.element),i=e.find(this.options.items),i.not(".ui-menu-item").each(function(){var e=t(this);a._isDivider(e)&&a._addClass(e,"ui-menu-divider","ui-widget-content")}),n=i.not(".ui-menu-item, .ui-menu-divider"),o=n.children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(n,"ui-menu-item")._addClass(o,"ui-menu-item-wrapper"),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!t.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){if("icons"===t){var i=this.element.find(".ui-menu-icon");this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)}this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t+""),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i,s,n;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),s=this.active.children(".ui-menu-item-wrapper"),this._addClass(s,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),n=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(n,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=e.children(".ui-menu"),i.length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(e){var i,s,n,o,a,r;this._hasScroll()&&(i=parseFloat(t.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(t.css(this.activeMenu[0],"paddingTop"))||0,n=e.offset().top-this.activeMenu.offset().top-i-s,o=this.activeMenu.scrollTop(),a=this.activeMenu.height(),r=e.outerHeight(),0>n?this.activeMenu.scrollTop(o+n):n+r>a&&this.activeMenu.scrollTop(o+n-a+r))},blur:function(t,e){e||clearTimeout(this.timer),this.active&&(this._removeClass(this.active.children(".ui-menu-item-wrapper"),null,"ui-state-active"),this._trigger("blur",t,{item:this.active}),this.active=null)},_startOpening:function(t){clearTimeout(this.timer),"true"===t.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(t)},this.delay))},_open:function(e){var i=t.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(e.parents(".ui-menu")).hide().attr("aria-hidden","true"),e.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(e,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:t(e&&e.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(e),this._removeClass(s.find(".ui-state-active"),null,"ui-state-active"),this.activeMenu=s},this.delay)},_close:function(t){t||(t=this.active?this.active.parent():this.element),t.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false")},_closeOnDocumentClick:function(e){return!t(e.target).closest(".ui-menu").length},_isDivider:function(t){return!/[^\-\u2014\u2013\s]/.test(t.text())},collapse:function(t){var e=this.active&&this.active.parent().closest(".ui-menu-item",this.element);e&&e.length&&(this._close(),this.focus(t,e))},expand:function(t){var e=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();e&&e.length&&(this._open(e.parent()),this._delay(function(){this.focus(t,e)}))},next:function(t){this._move("next","first",t)},previous:function(t){this._move("prev","last",t)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(t,e,i){var s;this.active&&(s="first"===t||"last"===t?this.active["first"===t?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[t+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[e]()),this.focus(i,s)},nextPage:function(e){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=t(this),0>i.offset().top-s-n}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(e),void 0)},previousPage:function(e){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=t(this),i.offset().top-s+n>0}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items).first())),void 0):(this.next(e),void 0)},_hasScroll:function(){return this.element.outerHeight()",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var e,i,s,n=this.element[0].nodeName.toLowerCase(),o="textarea"===n,a="input"===n;this.isMultiLine=o||!a&&this._isContentEditable(this.element),this.valueMethod=this.element[o||a?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return e=!0,s=!0,i=!0,void 0;e=!1,s=!1,i=!1;var o=t.ui.keyCode;switch(n.keyCode){case o.PAGE_UP:e=!0,this._move("previousPage",n);break;case o.PAGE_DOWN:e=!0,this._move("nextPage",n);break;case o.UP:e=!0,this._keyEvent("previous",n);break;case o.DOWN:e=!0,this._keyEvent("next",n);break;case o.ENTER:this.menu.active&&(e=!0,n.preventDefault(),this.menu.select(n));break;case o.TAB:this.menu.active&&this.menu.select(n);break;case o.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(e)return e=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=t.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(t){return s?(s=!1,t.preventDefault(),void 0):(this._searchTimeout(t),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(t),this._change(t),void 0)}}),this._initSource(),this.menu=t("
        ").appendTo(this._appendTo()).menu({role:null}).hide().menu("instance"),this._addClass(this.menu.element,"ui-autocomplete","ui-front"),this._on(this.menu.element,{mousedown:function(e){e.preventDefault(),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,this.element[0]!==t.ui.safeActiveElement(this.document[0])&&this.element.trigger("focus")})},menufocus:function(e,i){var s,n;return this.isNewMenu&&(this.isNewMenu=!1,e.originalEvent&&/^mouse/.test(e.originalEvent.type))?(this.menu.blur(),this.document.one("mousemove",function(){t(e.target).trigger(e.originalEvent)}),void 0):(n=i.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",e,{item:n})&&e.originalEvent&&/^key/.test(e.originalEvent.type)&&this._value(n.value),s=i.item.attr("aria-label")||n.value,s&&t.trim(s).length&&(this.liveRegion.children().hide(),t("
        ").text(s).appendTo(this.liveRegion)),void 0)},menuselect:function(e,i){var s=i.item.data("ui-autocomplete-item"),n=this.previous;this.element[0]!==t.ui.safeActiveElement(this.document[0])&&(this.element.trigger("focus"),this.previous=n,this._delay(function(){this.previous=n,this.selectedItem=s})),!1!==this._trigger("select",e,{item:s})&&this._value(s.value),this.term=this._value(),this.close(e),this.selectedItem=s}}),this.liveRegion=t("
        ",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(t,e){this._super(t,e),"source"===t&&this._initSource(),"appendTo"===t&&this.menu.element.appendTo(this._appendTo()),"disabled"===t&&e&&this.xhr&&this.xhr.abort()},_isEventTargetInWidget:function(e){var i=this.menu.element[0];return e.target===this.element[0]||e.target===i||t.contains(i,e.target)},_closeOnClickOutside:function(t){this._isEventTargetInWidget(t)||this.close()},_appendTo:function(){var e=this.options.appendTo;return e&&(e=e.jquery||e.nodeType?t(e):this.document.find(e).eq(0)),e&&e[0]||(e=this.element.closest(".ui-front, dialog")),e.length||(e=this.document[0].body),e},_initSource:function(){var e,i,s=this;t.isArray(this.options.source)?(e=this.options.source,this.source=function(i,s){s(t.ui.autocomplete.filter(e,i.term))}):"string"==typeof this.options.source?(i=this.options.source,this.source=function(e,n){s.xhr&&s.xhr.abort(),s.xhr=t.ajax({url:i,data:e,dataType:"json",success:function(t){n(t)},error:function(){n([])}})}):this.source=this.options.source},_searchTimeout:function(t){clearTimeout(this.searching),this.searching=this._delay(function(){var e=this.term===this._value(),i=this.menu.element.is(":visible"),s=t.altKey||t.ctrlKey||t.metaKey||t.shiftKey;(!e||e&&!i&&!s)&&(this.selectedItem=null,this.search(null,t))},this.options.delay)},search:function(t,e){return t=null!=t?t:this._value(),this.term=this._value(),t.length").append(t("
        ").text(i.label)).appendTo(e)},_move:function(t,e){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[t](e),void 0):(this.search(null,e),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),t.extend(t.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(e,i){var s=RegExp(t.ui.autocomplete.escapeRegex(i),"i");return t.grep(e,function(t){return s.test(t.label||t.value||t)})}}),t.widget("ui.autocomplete",t.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(t>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(e){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=e&&e.length?this.options.messages.results(e.length):this.options.messages.noResults,this.liveRegion.children().hide(),t("
        ").text(i).appendTo(this.liveRegion))}}),t.ui.autocomplete}); \ No newline at end of file diff --git a/docs/jdocs/script-dir/jquery-ui.structure.min.css b/docs/jdocs/script-dir/jquery-ui.structure.min.css deleted file mode 100644 index e8808927f..000000000 --- a/docs/jdocs/script-dir/jquery-ui.structure.min.css +++ /dev/null @@ -1,5 +0,0 @@ -/*! jQuery UI - v1.12.1 - 2018-12-06 -* http://jqueryui.com -* Copyright jQuery Foundation and other contributors; Licensed MIT */ - -.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:before,.ui-helper-clearfix:after{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-zfix{width:100%;height:100%;top:0;left:0;position:absolute;opacity:0;filter:Alpha(Opacity=0)}.ui-front{z-index:100}.ui-state-disabled{cursor:default!important;pointer-events:none}.ui-icon{display:inline-block;vertical-align:middle;margin-top:-.25em;position:relative;text-indent:-99999px;overflow:hidden;background-repeat:no-repeat}.ui-widget-icon-block{left:50%;margin-left:-8px;display:block}.ui-widget-overlay{position:fixed;top:0;left:0;width:100%;height:100%}.ui-autocomplete{position:absolute;top:0;left:0;cursor:default}.ui-menu{list-style:none;padding:0;margin:0;display:block;outline:0}.ui-menu .ui-menu{position:absolute}.ui-menu .ui-menu-item{margin:0;cursor:pointer;list-style-image:url("data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")}.ui-menu .ui-menu-item-wrapper{position:relative;padding:3px 1em 3px .4em}.ui-menu .ui-menu-divider{margin:5px 0;height:0;font-size:0;line-height:0;border-width:1px 0 0 0}.ui-menu .ui-state-focus,.ui-menu .ui-state-active{margin:-1px}.ui-menu-icons{position:relative}.ui-menu-icons .ui-menu-item-wrapper{padding-left:2em}.ui-menu .ui-icon{position:absolute;top:0;bottom:0;left:.2em;margin:auto 0}.ui-menu .ui-menu-icon{left:auto;right:0} \ No newline at end of file diff --git a/docs/jdocs/script.js b/docs/jdocs/script.js index 864989cf4..b34635693 100644 --- a/docs/jdocs/script.js +++ b/docs/jdocs/script.js @@ -1,132 +1,30 @@ -/* - * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -var moduleSearchIndex; -var packageSearchIndex; -var typeSearchIndex; -var memberSearchIndex; -var tagSearchIndex; -function loadScripts(doc, tag) { - createElem(doc, tag, 'search.js'); - - createElem(doc, tag, 'module-search-index.js'); - createElem(doc, tag, 'package-search-index.js'); - createElem(doc, tag, 'type-search-index.js'); - createElem(doc, tag, 'member-search-index.js'); - createElem(doc, tag, 'tag-search-index.js'); -} - -function createElem(doc, tag, path) { - var script = doc.createElement(tag); - var scriptElement = doc.getElementsByTagName(tag)[0]; - script.src = pathtoroot + path; - scriptElement.parentNode.insertBefore(script, scriptElement); -} - -function show(tableId, selected, columns) { - if (tableId !== selected) { - document.querySelectorAll('div.' + tableId + ':not(.' + selected + ')') - .forEach(function(elem) { - elem.style.display = 'none'; - }); - } - document.querySelectorAll('div.' + selected) - .forEach(function(elem, index) { - elem.style.display = ''; - var isEvenRow = index % (columns * 2) < columns; - elem.classList.remove(isEvenRow ? oddRowColor : evenRowColor); - elem.classList.add(isEvenRow ? evenRowColor : oddRowColor); - }); - updateTabs(tableId, selected); -} - -function updateTabs(tableId, selected) { - document.querySelector('div#' + tableId +' .summary-table') - .setAttribute('aria-labelledby', selected); - document.querySelectorAll('button[id^="' + tableId + '"]') - .forEach(function(tab, index) { - if (selected === tab.id || (tableId === selected && index === 0)) { - tab.className = activeTableTab; - tab.setAttribute('aria-selected', true); - tab.setAttribute('tabindex',0); - } else { - tab.className = tableTab; - tab.setAttribute('aria-selected', false); - tab.setAttribute('tabindex',-1); - } - }); -} - -function switchTab(e) { - var selected = document.querySelector('[aria-selected=true]'); - if (selected) { - if ((e.keyCode === 37 || e.keyCode === 38) && selected.previousSibling) { - // left or up arrow key pressed: move focus to previous tab - selected.previousSibling.click(); - selected.previousSibling.focus(); - e.preventDefault(); - } else if ((e.keyCode === 39 || e.keyCode === 40) && selected.nextSibling) { - // right or down arrow key pressed: move focus to next tab - selected.nextSibling.click(); - selected.nextSibling.focus(); - e.preventDefault(); +function show(type) +{ + count = 0; + for (var key in methods) { + var row = document.getElementById(key); + if ((methods[key] & type) != 0) { + row.style.display = ''; + row.className = (count++ % 2) ? rowColor : altColor; } + else + row.style.display = 'none'; } + updateTabs(type); } -var updateSearchResults = function() {}; - -function indexFilesLoaded() { - return moduleSearchIndex - && packageSearchIndex - && typeSearchIndex - && memberSearchIndex - && tagSearchIndex; -} - -// Workaround for scroll position not being included in browser history (8249133) -document.addEventListener("DOMContentLoaded", function(e) { - var contentDiv = document.querySelector("div.flex-content"); - window.addEventListener("popstate", function(e) { - if (e.state !== null) { - contentDiv.scrollTop = e.state; +function updateTabs(type) +{ + for (var value in tabs) { + var sNode = document.getElementById(tabs[value][0]); + var spanNode = sNode.firstChild; + if (value == type) { + sNode.className = activeTableTab; + spanNode.innerHTML = tabs[value][1]; } - }); - window.addEventListener("hashchange", function(e) { - history.replaceState(contentDiv.scrollTop, document.title); - }); - contentDiv.addEventListener("scroll", function(e) { - var timeoutID; - if (!timeoutID) { - timeoutID = setTimeout(function() { - history.replaceState(contentDiv.scrollTop, document.title); - timeoutID = null; - }, 100); + else { + sNode.className = tableTab; + spanNode.innerHTML = "" + tabs[value][1] + ""; } - }); - if (!location.hash) { - history.replaceState(contentDiv.scrollTop, document.title); } -}); +} diff --git a/docs/jdocs/search.js b/docs/jdocs/search.js deleted file mode 100644 index db3b2f4a6..000000000 --- a/docs/jdocs/search.js +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -var noResult = {l: "No results found"}; -var loading = {l: "Loading search index..."}; -var catModules = "Modules"; -var catPackages = "Packages"; -var catTypes = "Classes and Interfaces"; -var catMembers = "Members"; -var catSearchTags = "Search Tags"; -var highlight = "$&"; -var searchPattern = ""; -var fallbackPattern = ""; -var RANKING_THRESHOLD = 2; -var NO_MATCH = 0xffff; -var MIN_RESULTS = 3; -var MAX_RESULTS = 500; -var UNNAMED = ""; -function escapeHtml(str) { - return str.replace(//g, ">"); -} -function getHighlightedText(item, matcher, fallbackMatcher) { - var escapedItem = escapeHtml(item); - var highlighted = escapedItem.replace(matcher, highlight); - if (highlighted === escapedItem) { - highlighted = escapedItem.replace(fallbackMatcher, highlight) - } - return highlighted; -} -function getURLPrefix(ui) { - var urlPrefix=""; - var slash = "/"; - if (ui.item.category === catModules) { - return ui.item.l + slash; - } else if (ui.item.category === catPackages && ui.item.m) { - return ui.item.m + slash; - } else if (ui.item.category === catTypes || ui.item.category === catMembers) { - if (ui.item.m) { - urlPrefix = ui.item.m + slash; - } else { - $.each(packageSearchIndex, function(index, item) { - if (item.m && ui.item.p === item.l) { - urlPrefix = item.m + slash; - } - }); - } - } - return urlPrefix; -} -function createSearchPattern(term) { - var pattern = ""; - var isWordToken = false; - term.replace(/,\s*/g, ", ").trim().split(/\s+/).forEach(function(w, index) { - if (index > 0) { - // whitespace between identifiers is significant - pattern += (isWordToken && /^\w/.test(w)) ? "\\s+" : "\\s*"; - } - var tokens = w.split(/(?=[A-Z,.()<>[\/])/); - for (var i = 0; i < tokens.length; i++) { - var s = tokens[i]; - if (s === "") { - continue; - } - pattern += $.ui.autocomplete.escapeRegex(s); - isWordToken = /\w$/.test(s); - if (isWordToken) { - pattern += "([a-z0-9_$<>\\[\\]]*?)"; - } - } - }); - return pattern; -} -function createMatcher(pattern, flags) { - var isCamelCase = /[A-Z]/.test(pattern); - return new RegExp(pattern, flags + (isCamelCase ? "" : "i")); -} -var watermark = 'Search'; -$(function() { - var search = $("#search-input"); - var reset = $("#reset-button"); - search.val(''); - search.prop("disabled", false); - reset.prop("disabled", false); - search.val(watermark).addClass('watermark'); - search.blur(function() { - if ($(this).val().length === 0) { - $(this).val(watermark).addClass('watermark'); - } - }); - search.on('click keydown paste', function() { - if ($(this).val() === watermark) { - $(this).val('').removeClass('watermark'); - } - }); - reset.click(function() { - search.val('').focus(); - }); - search.focus()[0].setSelectionRange(0, 0); -}); -$.widget("custom.catcomplete", $.ui.autocomplete, { - _create: function() { - this._super(); - this.widget().menu("option", "items", "> :not(.ui-autocomplete-category)"); - }, - _renderMenu: function(ul, items) { - var rMenu = this; - var currentCategory = ""; - rMenu.menu.bindings = $(); - $.each(items, function(index, item) { - var li; - if (item.category && item.category !== currentCategory) { - ul.append("
      • " + item.category + "
      • "); - currentCategory = item.category; - } - li = rMenu._renderItemData(ul, item); - if (item.category) { - li.attr("aria-label", item.category + " : " + item.l); - li.attr("class", "result-item"); - } else { - li.attr("aria-label", item.l); - li.attr("class", "result-item"); - } - }); - }, - _renderItem: function(ul, item) { - var label = ""; - var matcher = createMatcher(escapeHtml(searchPattern), "g"); - var fallbackMatcher = new RegExp(fallbackPattern, "gi") - if (item.category === catModules) { - label = getHighlightedText(item.l, matcher, fallbackMatcher); - } else if (item.category === catPackages) { - label = getHighlightedText(item.l, matcher, fallbackMatcher); - } else if (item.category === catTypes) { - label = (item.p && item.p !== UNNAMED) - ? getHighlightedText(item.p + "." + item.l, matcher, fallbackMatcher) - : getHighlightedText(item.l, matcher, fallbackMatcher); - } else if (item.category === catMembers) { - label = (item.p && item.p !== UNNAMED) - ? getHighlightedText(item.p + "." + item.c + "." + item.l, matcher, fallbackMatcher) - : getHighlightedText(item.c + "." + item.l, matcher, fallbackMatcher); - } else if (item.category === catSearchTags) { - label = getHighlightedText(item.l, matcher, fallbackMatcher); - } else { - label = item.l; - } - var li = $("
      • ").appendTo(ul); - var div = $("
        ").appendTo(li); - if (item.category === catSearchTags && item.h) { - if (item.d) { - div.html(label + " (" + item.h + ")
        " - + item.d + "
        "); - } else { - div.html(label + " (" + item.h + ")"); - } - } else { - if (item.m) { - div.html(item.m + "/" + label); - } else { - div.html(label); - } - } - return li; - } -}); -function rankMatch(match, category) { - if (!match) { - return NO_MATCH; - } - var index = match.index; - var input = match.input; - var leftBoundaryMatch = 2; - var periferalMatch = 0; - // make sure match is anchored on a left word boundary - if (index === 0 || /\W/.test(input[index - 1]) || "_" === input[index]) { - leftBoundaryMatch = 0; - } else if ("_" === input[index - 1] || (input[index] === input[index].toUpperCase() && !/^[A-Z0-9_$]+$/.test(input))) { - leftBoundaryMatch = 1; - } - var matchEnd = index + match[0].length; - var leftParen = input.indexOf("("); - var endOfName = leftParen > -1 ? leftParen : input.length; - // exclude peripheral matches - if (category !== catModules && category !== catSearchTags) { - var delim = category === catPackages ? "/" : "."; - if (leftParen > -1 && leftParen < index) { - periferalMatch += 2; - } else if (input.lastIndexOf(delim, endOfName) >= matchEnd) { - periferalMatch += 2; - } - } - var delta = match[0].length === endOfName ? 0 : 1; // rank full match higher than partial match - for (var i = 1; i < match.length; i++) { - // lower ranking if parts of the name are missing - if (match[i]) - delta += match[i].length; - } - if (category === catTypes) { - // lower ranking if a type name contains unmatched camel-case parts - if (/[A-Z]/.test(input.substring(matchEnd))) - delta += 5; - if (/[A-Z]/.test(input.substring(0, index))) - delta += 5; - } - return leftBoundaryMatch + periferalMatch + (delta / 200); - -} -function doSearch(request, response) { - var result = []; - searchPattern = createSearchPattern(request.term); - fallbackPattern = createSearchPattern(request.term.toLowerCase()); - if (searchPattern === "") { - return this.close(); - } - var camelCaseMatcher = createMatcher(searchPattern, ""); - var fallbackMatcher = new RegExp(fallbackPattern, "i"); - - function searchIndexWithMatcher(indexArray, matcher, category, nameFunc) { - if (indexArray) { - var newResults = []; - $.each(indexArray, function (i, item) { - item.category = category; - var ranking = rankMatch(matcher.exec(nameFunc(item)), category); - if (ranking < RANKING_THRESHOLD) { - newResults.push({ranking: ranking, item: item}); - } - return newResults.length <= MAX_RESULTS; - }); - return newResults.sort(function(e1, e2) { - return e1.ranking - e2.ranking; - }).map(function(e) { - return e.item; - }); - } - return []; - } - function searchIndex(indexArray, category, nameFunc) { - var primaryResults = searchIndexWithMatcher(indexArray, camelCaseMatcher, category, nameFunc); - result = result.concat(primaryResults); - if (primaryResults.length <= MIN_RESULTS && !camelCaseMatcher.ignoreCase) { - var secondaryResults = searchIndexWithMatcher(indexArray, fallbackMatcher, category, nameFunc); - result = result.concat(secondaryResults.filter(function (item) { - return primaryResults.indexOf(item) === -1; - })); - } - } - - searchIndex(moduleSearchIndex, catModules, function(item) { return item.l; }); - searchIndex(packageSearchIndex, catPackages, function(item) { - return (item.m && request.term.indexOf("/") > -1) - ? (item.m + "/" + item.l) : item.l; - }); - searchIndex(typeSearchIndex, catTypes, function(item) { - return request.term.indexOf(".") > -1 ? item.p + "." + item.l : item.l; - }); - searchIndex(memberSearchIndex, catMembers, function(item) { - return request.term.indexOf(".") > -1 - ? item.p + "." + item.c + "." + item.l : item.l; - }); - searchIndex(tagSearchIndex, catSearchTags, function(item) { return item.l; }); - - if (!indexFilesLoaded()) { - updateSearchResults = function() { - doSearch(request, response); - } - result.unshift(loading); - } else { - updateSearchResults = function() {}; - } - response(result); -} -$(function() { - $("#search-input").catcomplete({ - minLength: 1, - delay: 300, - source: doSearch, - response: function(event, ui) { - if (!ui.content.length) { - ui.content.push(noResult); - } else { - $("#search-input").empty(); - } - }, - autoFocus: true, - focus: function(event, ui) { - return false; - }, - position: { - collision: "flip" - }, - select: function(event, ui) { - if (ui.item.category) { - var url = getURLPrefix(ui); - if (ui.item.category === catModules) { - url += "module-summary.html"; - } else if (ui.item.category === catPackages) { - if (ui.item.u) { - url = ui.item.u; - } else { - url += ui.item.l.replace(/\./g, '/') + "/package-summary.html"; - } - } else if (ui.item.category === catTypes) { - if (ui.item.u) { - url = ui.item.u; - } else if (ui.item.p === UNNAMED) { - url += ui.item.l + ".html"; - } else { - url += ui.item.p.replace(/\./g, '/') + "/" + ui.item.l + ".html"; - } - } else if (ui.item.category === catMembers) { - if (ui.item.p === UNNAMED) { - url += ui.item.c + ".html" + "#"; - } else { - url += ui.item.p.replace(/\./g, '/') + "/" + ui.item.c + ".html" + "#"; - } - if (ui.item.u) { - url += ui.item.u; - } else { - url += ui.item.l; - } - } else if (ui.item.category === catSearchTags) { - url += ui.item.u; - } - if (top !== window) { - parent.classFrame.location = pathtoroot + url; - } else { - window.location.href = pathtoroot + url; - } - $("#search-input").focus(); - } - } - }); -}); diff --git a/docs/jdocs/stylesheet.css b/docs/jdocs/stylesheet.css index 836c62da8..98055b22d 100644 --- a/docs/jdocs/stylesheet.css +++ b/docs/jdocs/stylesheet.css @@ -1,45 +1,36 @@ +/* Javadoc style sheet */ /* - * Javadoc style sheet - */ +Overall document style +*/ @import url('resources/fonts/dejavu.css'); -/* - * Styles for individual HTML elements. - * - * These are styles that are specific to individual HTML elements. Changing them affects the style of a particular - * HTML element throughout the page. - */ - body { background-color:#ffffff; color:#353833; font-family:'DejaVu Sans', Arial, Helvetica, sans-serif; font-size:14px; margin:0; - padding:0; - height:100%; - width:100%; -} -iframe { - margin:0; - padding:0; - height:100%; - width:100%; - overflow-y:scroll; - border:none; } a:link, a:visited { text-decoration:none; color:#4A6782; } -a[href]:hover, a[href]:focus { +a:hover, a:focus { text-decoration:none; color:#bb7a2a; } +a:active { + text-decoration:none; + color:#4A6782; +} a[name] { color:#353833; } +a[name]:hover { + text-decoration:none; + color:#353833; +} pre { font-family:'DejaVu Sans Mono', monospace; font-size:14px; @@ -52,24 +43,22 @@ h2 { } h3 { font-size:16px; + font-style:italic; } h4 { - font-size:15px; + font-size:13px; } h5 { - font-size:14px; + font-size:12px; } h6 { - font-size:13px; + font-size:11px; } ul { list-style-type:disc; } code, tt { font-family:'DejaVu Sans Mono', monospace; -} -:not(h1, h2, h3, h4, h5, h6) > code, -:not(h1, h2, h3, h4, h5, h6) > tt { font-size:14px; padding-top:4px; margin-top:8px; @@ -80,7 +69,7 @@ dt code { font-size:14px; padding-top:4px; } -.summary-table dt code { +table tr td dt code { font-family:'DejaVu Sans Mono', monospace; font-size:14px; vertical-align:top; @@ -89,34 +78,31 @@ dt code { sup { font-size:8px; } -button { - font-family: 'DejaVu Sans', Arial, Helvetica, sans-serif; - font-size: 14px; -} -/* - * Styles for HTML generated by javadoc. - * - * These are style classes that are used by the standard doclet to generate HTML documentation. - */ - /* - * Styles for document title and copyright. - */ +Document title and Copyright styles +*/ .clear { clear:both; - height:0; + height:0px; overflow:hidden; } -.about-language { +.aboutLanguage { float:right; - padding:0 21px 8px 8px; + padding:0px 21px; font-size:11px; + z-index:200; margin-top:-9px; - height:2.9em; } -.legal-copy { +.legalCopy { margin-left:.5em; } +.bar a, .bar a:link, .bar a:visited, .bar a:active { + color:#FFFFFF; + text-decoration:none; +} +.bar a:hover, .bar a:focus { + color:#bb7a2a; +} .tab { background-color:#0066FF; color:#ffffff; @@ -125,240 +111,263 @@ button { font-weight:bold; } /* - * Styles for navigation bar. - */ -@media screen { - .flex-box { - position:fixed; - display:flex; - flex-direction:column; - height: 100%; - width: 100%; - } - .flex-header { - flex: 0 0 auto; - } - .flex-content { - flex: 1 1 auto; - overflow-y: auto; - } -} -.top-nav { +Navigation bar styles +*/ +.bar { + background-color:#4D7A97; + color:#FFFFFF; + padding:.8em .5em .4em .8em; + height:auto;/*height:1.8em;*/ + font-size:11px; + margin:0; +} +.topNav { + background-color:#4D7A97; + color:#FFFFFF; + float:left; + padding:0; + width:100%; + clear:right; + height:2.8em; + padding-top:10px; + overflow:hidden; + font-size:12px; +} +.bottomNav { + margin-top:10px; background-color:#4D7A97; color:#FFFFFF; float:left; padding:0; width:100%; clear:right; - min-height:2.8em; + height:2.8em; padding-top:10px; overflow:hidden; font-size:12px; } -.sub-nav { +.subNav { background-color:#dee3e9; float:left; width:100%; overflow:hidden; font-size:12px; } -.sub-nav div { +.subNav div { clear:left; float:left; padding:0 0 5px 6px; text-transform:uppercase; } -.sub-nav .nav-list { - padding-top:5px; -} -ul.nav-list { - display:block; - margin:0 25px 0 0; - padding:0; -} -ul.sub-nav-list { +ul.navList, ul.subNavList { float:left; margin:0 25px 0 0; padding:0; } -ul.nav-list li { +ul.navList li{ list-style:none; float:left; padding: 5px 6px; text-transform:uppercase; } -.sub-nav .nav-list-search { - float:right; - margin:0 0 0 0; - padding:5px 6px; - clear:none; -} -.nav-list-search label { - position:relative; - right:-16px; -} -ul.sub-nav-list li { +ul.subNavList li{ list-style:none; float:left; - padding-top:10px; } -.top-nav a:link, .top-nav a:active, .top-nav a:visited { +.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited { color:#FFFFFF; text-decoration:none; text-transform:uppercase; } -.top-nav a:hover { +.topNav a:hover, .bottomNav a:hover { text-decoration:none; color:#bb7a2a; text-transform:uppercase; } -.nav-bar-cell1-rev { +.navBarCell1Rev { background-color:#F8981D; color:#253441; margin: auto 5px; } -.skip-nav { +.skipNav { position:absolute; top:auto; left:-9999px; overflow:hidden; } /* - * Hide navigation links and search box in print layout - */ -@media print { - ul.nav-list, div.sub-nav { - display:none; - } +Page header and footer styles +*/ +.header, .footer { + clear:both; + margin:0 20px; + padding:5px 0 0 0; +} +.indexHeader { + margin:10px; + position:relative; +} +.indexHeader span{ + margin-right:15px; +} +.indexHeader h1 { + font-size:13px; } -/* - * Styles for page header and footer. - */ .title { color:#2c4557; margin:10px 0; } -.sub-title { +.subTitle { margin:5px 0 0 0; } .header ul { margin:0 0 15px 0; padding:0; } +.footer ul { + margin:20px 0 5px 0; +} .header ul li, .footer ul li { list-style:none; font-size:13px; } /* - * Styles for headings. - */ -body.class-declaration-page .summary h2, -body.class-declaration-page .details h2, -body.class-use-page h2, -body.module-declaration-page .block-list h2 { - font-style: italic; - padding:0; - margin:15px 0; +Heading styles +*/ +div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 { + background-color:#dee3e9; + border:1px solid #d0d9e0; + margin:0 0 6px -8px; + padding:7px 5px; } -body.class-declaration-page .summary h3, -body.class-declaration-page .details h3, -body.class-declaration-page .summary .inherited-list h2 { +ul.blockList ul.blockList ul.blockList li.blockList h3 { background-color:#dee3e9; border:1px solid #d0d9e0; margin:0 0 6px -8px; padding:7px 5px; } +ul.blockList ul.blockList li.blockList h3 { + padding:0; + margin:15px 0; +} +ul.blockList li.blockList h2 { + padding:0px 0 20px 0; +} /* - * Styles for page layout containers. - */ -main { +Page layout container styles +*/ +.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer { clear:both; padding:10px 20px; position:relative; } -dl.notes > dt { - font-family: 'DejaVu Sans', Arial, Helvetica, sans-serif; +.indexContainer { + margin:10px; + position:relative; + font-size:12px; +} +.indexContainer h2 { + font-size:13px; + padding:0 0 3px 0; +} +.indexContainer ul { + margin:0; + padding:0; +} +.indexContainer ul li { + list-style:none; + padding-top:2px; +} +.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt { font-size:12px; font-weight:bold; margin:10px 0 0 0; color:#4E4E4E; } -dl.notes > dd { - margin:5px 10px 10px 0; +.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd { + margin:5px 0 10px 0px; font-size:14px; - font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; + font-family:'DejaVu Sans Mono',monospace; } -dl.name-value > dt { +.serializedFormContainer dl.nameValue dt { margin-left:1px; font-size:1.1em; display:inline; font-weight:bold; } -dl.name-value > dd { +.serializedFormContainer dl.nameValue dd { margin:0 0 0 1px; font-size:1.1em; display:inline; } /* - * Styles for lists. - */ -li.circle { - list-style:circle; -} +List styles +*/ ul.horizontal li { display:inline; font-size:0.9em; } -div.inheritance { +ul.inheritance { margin:0; padding:0; } -div.inheritance div.inheritance { - margin-left:2em; +ul.inheritance li { + display:inline; + list-style:none; } -ul.block-list, -ul.details-list, -ul.member-list, -ul.summary-list { +ul.inheritance li ul.inheritance { + margin-left:15px; + padding-left:15px; + padding-top:1px; +} +ul.blockList, ul.blockListLast { margin:10px 0 10px 0; padding:0; } -ul.block-list > li, -ul.details-list > li, -ul.member-list > li, -ul.summary-list > li { +ul.blockList li.blockList, ul.blockListLast li.blockList { list-style:none; margin-bottom:15px; line-height:1.4; } -.summary-table dl, .summary-table dl dt, .summary-table dl dd { - margin-top:0; - margin-bottom:1px; +ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList { + padding:0px 20px 5px 10px; + border:1px solid #ededed; + background-color:#f8f8f8; +} +ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList { + padding:0 0 5px 8px; + background-color:#ffffff; + border:none; } -ul.see-list, ul.see-list-long { - padding-left: 0; - list-style: none; +ul.blockList ul.blockList ul.blockList ul.blockList li.blockList { + margin-left:0; + padding-left:0; + padding-bottom:15px; + border:none; } -ul.see-list li { - display: inline; +ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast { + list-style:none; + border-bottom:none; + padding-bottom:0; } -ul.see-list li:not(:last-child):after, -ul.see-list-long li:not(:last-child):after { - content: ", "; - white-space: pre-wrap; +table tr td dl, table tr td dl dt, table tr td dl dd { + margin-top:0; + margin-bottom:1px; } /* - * Styles for tables. - */ -.summary-table, .details-table { +Table styles +*/ +.overviewSummary, .memberSummary, .typeSummary, .useSummary, .constantsSummary, .deprecatedSummary { width:100%; - border-spacing:0; - border-left:1px solid #EEE; - border-right:1px solid #EEE; - border-bottom:1px solid #EEE; - padding:0; + border-left:1px solid #EEE; + border-right:1px solid #EEE; + border-bottom:1px solid #EEE; } -.caption { +.overviewSummary, .memberSummary { + padding:0px; +} +.overviewSummary caption, .memberSummary caption, .typeSummary caption, +.useSummary caption, .constantsSummary caption, .deprecatedSummary caption { position:relative; text-align:left; background-repeat:no-repeat; @@ -366,20 +375,24 @@ ul.see-list-long li:not(:last-child):after { font-weight:bold; clear:none; overflow:hidden; - padding:0; + padding:0px; padding-top:10px; padding-left:1px; - margin:0; + margin:0px; white-space:pre; } -.caption a:link, .caption a:visited { - color:#1f389c; -} -.caption a:hover, -.caption a:active { +.overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link, +.useSummary caption a:link, .constantsSummary caption a:link, .deprecatedSummary caption a:link, +.overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover, +.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover, +.overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active, +.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active, +.overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited, +.useSummary caption a:visited, .constantsSummary caption a:visited, .deprecatedSummary caption a:visited { color:#FFFFFF; } -.caption span { +.overviewSummary caption span, .memberSummary caption span, .typeSummary caption span, +.useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span { white-space:nowrap; padding-top:5px; padding-left:12px; @@ -391,150 +404,144 @@ ul.see-list-long li:not(:last-child):after { border: none; height:16px; } -div.table-tabs { - padding:10px 0 0 1px; - margin:0; +.memberSummary caption span.activeTableTab span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + margin-right:3px; + display:inline-block; + float:left; + background-color:#F8981D; + height:16px; +} +.memberSummary caption span.tableTab span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + margin-right:3px; + display:inline-block; + float:left; + background-color:#4D7A97; + height:16px; +} +.memberSummary caption span.tableTab, .memberSummary caption span.activeTableTab { + padding-top:0px; + padding-left:0px; + padding-right:0px; + background-image:none; + float:none; + display:inline; +} +.overviewSummary .tabEnd, .memberSummary .tabEnd, .typeSummary .tabEnd, +.useSummary .tabEnd, .constantsSummary .tabEnd, .deprecatedSummary .tabEnd { + display:none; + width:5px; + position:relative; + float:left; + background-color:#F8981D; } -div.table-tabs > button { - border: none; - cursor: pointer; - padding: 5px 12px 7px 12px; - font-weight: bold; - margin-right: 3px; -} -div.table-tabs > button.active-table-tab { - background: #F8981D; - color: #253441; -} -div.table-tabs > button.table-tab { - background: #4D7A97; - color: #FFFFFF; -} -.two-column-summary { - display: grid; - grid-template-columns: minmax(15%, max-content) minmax(15%, auto); -} -.three-column-summary { - display: grid; - grid-template-columns: minmax(10%, max-content) minmax(15%, max-content) minmax(15%, auto); -} -.four-column-summary { - display: grid; - grid-template-columns: minmax(10%, max-content) minmax(10%, max-content) minmax(10%, max-content) minmax(10%, auto); -} -@media screen and (max-width: 600px) { - .two-column-summary { - display: grid; - grid-template-columns: 1fr; - } -} -@media screen and (max-width: 800px) { - .three-column-summary { - display: grid; - grid-template-columns: minmax(10%, max-content) minmax(25%, auto); - } - .three-column-summary .col-last { - grid-column-end: span 2; - } -} -@media screen and (max-width: 1000px) { - .four-column-summary { - display: grid; - grid-template-columns: minmax(15%, max-content) minmax(15%, auto); - } -} -.summary-table > div, .details-table > div { +.memberSummary .activeTableTab .tabEnd { + display:none; + width:5px; + margin-right:3px; + position:relative; + float:left; + background-color:#F8981D; +} +.memberSummary .tableTab .tabEnd { + display:none; + width:5px; + margin-right:3px; + position:relative; + background-color:#4D7A97; + float:left; + +} +.overviewSummary td, .memberSummary td, .typeSummary td, +.useSummary td, .constantsSummary td, .deprecatedSummary td { text-align:left; - padding: 8px 3px 3px 7px; + padding:0px 0px 12px 10px; } -.col-first, .col-second, .col-last, .col-constructor-name, .col-summary-item-name { +th.colOne, th.colFirst, th.colLast, .useSummary th, .constantsSummary th, +td.colOne, td.colFirst, td.colLast, .useSummary td, .constantsSummary td{ vertical-align:top; - padding-right:0; + padding-right:0px; padding-top:8px; padding-bottom:3px; } -.table-header { +th.colFirst, th.colLast, th.colOne, .constantsSummary th { background:#dee3e9; - font-weight: bold; + text-align:left; + padding:8px 3px 3px 7px; +} +td.colFirst, th.colFirst { + white-space:nowrap; + font-size:13px; } -.col-first, .col-first { +td.colLast, th.colLast { font-size:13px; } -.col-second, .col-second, .col-last, .col-constructor-name, .col-summary-item-name, .col-last { +td.colOne, th.colOne { font-size:13px; } -.col-first, .col-second, .col-constructor-name { +.overviewSummary td.colFirst, .overviewSummary th.colFirst, +.useSummary td.colFirst, .useSummary th.colFirst, +.overviewSummary td.colOne, .overviewSummary th.colOne, +.memberSummary td.colFirst, .memberSummary th.colFirst, +.memberSummary td.colOne, .memberSummary th.colOne, +.typeSummary td.colFirst{ + width:25%; vertical-align:top; - overflow: auto; -} -.col-last { - white-space:normal; -} -.col-first a:link, .col-first a:visited, -.col-second a:link, .col-second a:visited, -.col-first a:link, .col-first a:visited, -.col-second a:link, .col-second a:visited, -.col-constructor-name a:link, .col-constructor-name a:visited, -.col-summary-item-name a:link, .col-summary-item-name a:visited, -.constant-values-container a:link, .constant-values-container a:visited, -.all-classes-container a:link, .all-classes-container a:visited, -.all-packages-container a:link, .all-packages-container a:visited { +} +td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover { font-weight:bold; } -.table-sub-heading-color { +.tableSubHeadingColor { background-color:#EEEEFF; } -.even-row-color, .even-row-color .table-header { +.altColor { background-color:#FFFFFF; } -.odd-row-color, .odd-row-color .table-header { +.rowColor { background-color:#EEEEEF; } /* - * Styles for contents. - */ -.deprecated-content { +Content styles +*/ +.description pre { + margin-top:0; +} +.deprecatedContent { margin:0; padding:10px 0; } +.docSummary { + padding:0; +} + +ul.blockList ul.blockList ul.blockList li.blockList h3 { + font-style:normal; +} + div.block { font-size:14px; font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; } -.col-last div { - padding-top:0; + +td.colLast div { + padding-top:0px; } -.col-last a { + + +td.colLast a { padding-bottom:3px; } -.module-signature, -.package-signature, -.type-signature, -.member-signature { - font-family:'DejaVu Sans Mono', monospace; - font-size:14px; - margin:14px 0; - white-space: pre-wrap; -} -.module-signature, -.package-signature, -.type-signature { - margin-top: 0; -} -.member-signature .type-parameters-long, -.member-signature .parameters, -.member-signature .exceptions { - display: inline-block; - vertical-align: top; - white-space: pre; -} -.member-signature .type-parameters { - white-space: normal; -} /* - * Styles for formatting effect. - */ -.source-line-no { +Formatting effect styles +*/ +.sourceLineNo { color:green; padding:0 30px 0 0; } @@ -545,321 +552,23 @@ h1.hidden { } .block { display:block; - margin:0 10px 5px 0; + margin:3px 10px 2px 0px; color:#474747; } -.deprecated-label, .descfrm-type-label, .implementation-label, .member-name-label, .member-name-link, -.module-label-in-package, .module-label-in-type, .override-specify-label, .package-label-in-type, -.package-hierarchy-label, .type-name-label, .type-name-link, .search-tag-link, .preview-label { +.deprecatedLabel, .descfrmTypeLabel, .memberNameLabel, .memberNameLink, +.overrideSpecifyLabel, .packageHierarchyLabel, .paramLabel, .returnLabel, +.seeLabel, .simpleTagLabel, .throwsLabel, .typeNameLabel, .typeNameLink { font-weight:bold; } -.deprecation-comment, .help-footnote, .preview-comment { +.deprecationComment, .emphasizedPhrase, .interfaceName { font-style:italic; } -.deprecation-block { - font-size:14px; - font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; - border-style:solid; - border-width:thin; - border-radius:10px; - padding:10px; - margin-bottom:10px; - margin-right:10px; - display:inline-block; -} -.preview-block { - font-size:14px; - font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; - border-style:solid; - border-width:thin; - border-radius:10px; - padding:10px; - margin-bottom:10px; - margin-right:10px; - display:inline-block; -} -div.block div.deprecation-comment { + +div.block div.block span.deprecationComment, div.block div.block span.emphasizedPhrase, +div.block div.block span.interfaceName { font-style:normal; } -/* - * Styles specific to HTML5 elements. - */ -main, nav, header, footer, section { - display:block; -} -/* - * Styles for javadoc search. - */ -.ui-autocomplete-category { - font-weight:bold; - font-size:15px; - padding:7px 0 7px 3px; - background-color:#4D7A97; - color:#FFFFFF; -} -.result-item { - font-size:13px; -} -.ui-autocomplete { - max-height:85%; - max-width:65%; - overflow-y:scroll; - overflow-x:scroll; - white-space:nowrap; - box-shadow: 0 3px 6px rgba(0,0,0,0.16), 0 3px 6px rgba(0,0,0,0.23); -} -ul.ui-autocomplete { - position:fixed; - z-index:999999; -} -ul.ui-autocomplete li { - float:left; - clear:both; - width:100%; -} -.result-highlight { - font-weight:bold; -} -#search-input { - background-image:url('resources/glass.png'); - background-size:13px; - background-repeat:no-repeat; - background-position:2px 3px; - padding-left:20px; - position:relative; - right:-18px; - width:400px; -} -#reset-button { - background-color: rgb(255,255,255); - background-image:url('resources/x.png'); - background-position:center; - background-repeat:no-repeat; - background-size:12px; - border:0 none; - width:16px; - height:16px; - position:relative; - left:-4px; - top:-4px; - font-size:0px; -} -.watermark { - color:#545454; -} -.search-tag-desc-result { - font-style:italic; - font-size:11px; -} -.search-tag-holder-result { - font-style:italic; - font-size:12px; -} -.search-tag-result:target { - background-color:yellow; -} -.module-graph span { - display:none; - position:absolute; -} -.module-graph:hover span { - display:block; - margin: -100px 0 0 100px; - z-index: 1; -} -.inherited-list { - margin: 10px 0 10px 0; -} -section.class-description { - line-height: 1.4; -} -.summary section[class$="-summary"], .details section[class$="-details"], -.class-uses .detail, .serialized-class-details { - padding: 0px 20px 5px 10px; - border: 1px solid #ededed; - background-color: #f8f8f8; -} -.inherited-list, section[class$="-details"] .detail { - padding:0 0 5px 8px; - background-color:#ffffff; - border:none; -} -.vertical-separator { - padding: 0 5px; -} -ul.help-section-list { - margin: 0; -} -ul.help-subtoc > li { - display: inline-block; - padding-right: 5px; - font-size: smaller; -} -ul.help-subtoc > li::before { - content: "\2022" ; - padding-right:2px; -} -span.help-note { - font-style: italic; -} -/* - * Indicator icon for external links. - */ -main a[href*="://"]::after { - content:""; - display:inline-block; - background-image:url('data:image/svg+xml; utf8, \ - \ - \ - '); - background-size:100% 100%; - width:7px; - height:7px; - margin-left:2px; - margin-bottom:4px; -} -main a[href*="://"]:hover::after, -main a[href*="://"]:focus::after { - background-image:url('data:image/svg+xml; utf8, \ - \ - \ - '); -} -/* - * Styles for user-provided tables. - * - * borderless: - * No borders, vertical margins, styled caption. - * This style is provided for use with existing doc comments. - * In general, borderless tables should not be used for layout purposes. - * - * plain: - * Plain borders around table and cells, vertical margins, styled caption. - * Best for small tables or for complex tables for tables with cells that span - * rows and columns, when the "striped" style does not work well. - * - * striped: - * Borders around the table and vertical borders between cells, striped rows, - * vertical margins, styled caption. - * Best for tables that have a header row, and a body containing a series of simple rows. - */ - -table.borderless, -table.plain, -table.striped { - margin-top: 10px; - margin-bottom: 10px; -} -table.borderless > caption, -table.plain > caption, -table.striped > caption { - font-weight: bold; - font-size: smaller; -} -table.borderless th, table.borderless td, -table.plain th, table.plain td, -table.striped th, table.striped td { - padding: 2px 5px; -} -table.borderless, -table.borderless > thead > tr > th, table.borderless > tbody > tr > th, table.borderless > tr > th, -table.borderless > thead > tr > td, table.borderless > tbody > tr > td, table.borderless > tr > td { - border: none; -} -table.borderless > thead > tr, table.borderless > tbody > tr, table.borderless > tr { - background-color: transparent; -} -table.plain { - border-collapse: collapse; - border: 1px solid black; -} -table.plain > thead > tr, table.plain > tbody tr, table.plain > tr { - background-color: transparent; -} -table.plain > thead > tr > th, table.plain > tbody > tr > th, table.plain > tr > th, -table.plain > thead > tr > td, table.plain > tbody > tr > td, table.plain > tr > td { - border: 1px solid black; -} -table.striped { - border-collapse: collapse; - border: 1px solid black; -} -table.striped > thead { - background-color: #E3E3E3; -} -table.striped > thead > tr > th, table.striped > thead > tr > td { - border: 1px solid black; -} -table.striped > tbody > tr:nth-child(even) { - background-color: #EEE -} -table.striped > tbody > tr:nth-child(odd) { - background-color: #FFF -} -table.striped > tbody > tr > th, table.striped > tbody > tr > td { - border-left: 1px solid black; - border-right: 1px solid black; -} -table.striped > tbody > tr > th { - font-weight: normal; -} -/** - * Tweak font sizes and paddings for small screens. - */ -@media screen and (max-width: 1050px) { - #search-input { - width: 300px; - } -} -@media screen and (max-width: 800px) { - #search-input { - width: 200px; - } - .top-nav, - .bottom-nav { - font-size: 11px; - padding-top: 6px; - } - .sub-nav { - font-size: 11px; - } - .about-language { - padding-right: 16px; - } - ul.nav-list li, - .sub-nav .nav-list-search { - padding: 6px; - } - ul.sub-nav-list li { - padding-top: 5px; - } - main { - padding: 10px; - } - .summary section[class$="-summary"], .details section[class$="-details"], - .class-uses .detail, .serialized-class-details { - padding: 0 8px 5px 8px; - } - body { - -webkit-text-size-adjust: none; - } -} -@media screen and (max-width: 500px) { - #search-input { - width: 150px; - } - .top-nav, - .bottom-nav { - font-size: 10px; - } - .sub-nav { - font-size: 10px; - } - .about-language { - font-size: 10px; - padding-right: 12px; - } +div.contentContainer ul.blockList li.blockList h2{ + padding-bottom:0px; } diff --git a/docs/jdocs/tag-search-index.js b/docs/jdocs/tag-search-index.js deleted file mode 100644 index f2a440c7f..000000000 --- a/docs/jdocs/tag-search-index.js +++ /dev/null @@ -1 +0,0 @@ -tagSearchIndex = [{"l":"Constant Field Values","h":"","u":"constant-values.html"}];updateSearchResults(); \ No newline at end of file diff --git a/docs/jdocs/type-search-index.js b/docs/jdocs/type-search-index.js deleted file mode 100644 index 1ef935114..000000000 --- a/docs/jdocs/type-search-index.js +++ /dev/null @@ -1 +0,0 @@ -typeSearchIndex = [{"p":"neureka.backend.main.operations.functions","l":"Absolute"},{"p":"neureka.devices","l":"AbstractBaseDevice"},{"p":"neureka.common.composition","l":"AbstractComponentOwner"},{"p":"neureka.backend.main.implementations.convolution","l":"AbstractCPUConvolution"},{"p":"neureka.devices","l":"AbstractDevice"},{"p":"neureka.backend.api.template.algorithms","l":"AbstractDeviceAlgorithm"},{"p":"neureka.devices","l":"AbstractDeviceData"},{"p":"neureka.backend.api.template.algorithms","l":"AbstractFunAlgorithm"},{"p":"neureka.backend.api.template.algorithms","l":"AbstractFunDeviceAlgorithm"},{"p":"neureka.backend.api.template.implementations","l":"AbstractImplementationFor"},{"p":"neureka.ndim.config","l":"AbstractNDC"},{"p":"neureka.backend.api.template.operations","l":"AbstractOperation"},{"p":"neureka.devices","l":"Device.Access"},{"p":"neureka.autograd","l":"ADAction"},{"p":"neureka.backend.api.fun","l":"ADActionSupplier"},{"p":"neureka.optimization.implementations","l":"AdaGrad"},{"p":"neureka.optimization.implementations","l":"AdaGradFactory"},{"p":"neureka.optimization.implementations","l":"ADAM"},{"p":"neureka.optimization.implementations","l":"ADAMFactory"},{"p":"neureka.backend.main.operations.operator","l":"Addition"},{"p":"neureka.backend.api.fun","l":"ADSupportPredicate"},{"p":"neureka.autograd","l":"ADTarget"},{"p":"neureka.backend.api","l":"Algorithm"},{"l":"All Classes and Interfaces","u":"allclasses-index.html"},{"p":"neureka.backend.main.algorithms.internal","l":"AndBackward"},{"p":"neureka.backend.main.implementations","l":"CPUImplementation.AndImplementation"},{"p":"neureka.math.args","l":"Arg"},{"p":"neureka.math.args","l":"Args"},{"p":"neureka.backend.main.operations.other","l":"AssignLeft"},{"p":"neureka.framing.fluent","l":"At"},{"p":"neureka.backend.api","l":"AutoDiffMode"},{"p":"neureka","l":"Neureka.Settings.AutoGrad"},{"p":"neureka.math.args","l":"Arg.Axis"},{"p":"neureka.framing.fluent","l":"AxisFrame"},{"p":"neureka.fluent.slicing.states","l":"AxisOrGet"},{"p":"neureka.fluent.slicing.states","l":"AxisOrGetTensor"},{"p":"neureka.fluent.slicing","l":"AxisSliceBuilder"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"AXPY"},{"p":"neureka.backend.api","l":"BackendContext"},{"p":"neureka.backend.api","l":"BackendExtension"},{"p":"neureka.backend.api.ini","l":"BackendLoader"},{"p":"neureka.backend.api.ini","l":"BackendRegistry"},{"p":"neureka.devices.host.machine","l":"BasicMachine"},{"p":"neureka.backend.main.algorithms","l":"BiElementwise"},{"p":"neureka.backend.main.algorithms","l":"BiScalarBroadcast"},{"p":"neureka.backend.main.algorithms","l":"Broadcast"},{"p":"neureka.framing.fluent","l":"AxisFrame.Builder"},{"p":"neureka.backend.api","l":"Call.Builder"},{"p":"neureka.backend.api","l":"ExecutionCall.Builder"},{"p":"neureka.view","l":"NdaAsString.Builder"},{"p":"neureka.common.utility","l":"Cache"},{"p":"neureka.backend.api","l":"Call"},{"p":"neureka.math","l":"Function.Callable"},{"p":"neureka.backend.main.operations.other","l":"Cat"},{"p":"neureka.backend.main.operations.functions","l":"Cbrt"},{"p":"neureka.devices","l":"ReferenceCounter.ChangeEvent"},{"p":"neureka.devices","l":"ReferenceCounter.ChangeType"},{"p":"neureka.backend.ocl","l":"CLBackend"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLBiElementwise"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLBiElementwiseAddition"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLBiElementwiseDivision"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLBiElementwiseModulo"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLBiElementwiseMultiplication"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLBiElementwisePower"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLBiElementwiseSubtraction"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLBroadcast"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLBroadcastAddition"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLBroadcastDivision"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLBroadcastModulo"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLBroadcastMultiplication"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLBroadcastPower"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLBroadcastSubtraction"},{"p":"neureka.backend.main.implementations.convolution","l":"CLConvolution"},{"p":"neureka.backend.main.implementations.linear","l":"CLDot"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLElementwiseFunction"},{"p":"neureka.devices.opencl.utility","l":"CLFunctionCompiler"},{"p":"neureka.backend.main.operations.linear.internal.opencl","l":"CLGEMM"},{"p":"neureka.backend.main.implementations","l":"CLImplementation"},{"p":"neureka.backend.main.implementations.matmul","l":"CLMatMul"},{"p":"neureka.backend.main.implementations.elementwise","l":"CLRandomization"},{"p":"neureka.backend.main.operations.linear.internal.opencl","l":"CLReduce"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcast"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcastAddition"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcastDivision"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcastIdentity"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcastModulo"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcastMultiplication"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcastPower"},{"p":"neureka.backend.main.implementations.broadcast","l":"CLScalarBroadcastSubtraction"},{"p":"neureka.backend.main.implementations.scalar","l":"CLScalarFunction"},{"p":"neureka.backend.ocl","l":"CLSettings"},{"p":"neureka.backend.main.operations.linear.internal.opencl","l":"CLSum"},{"p":"neureka.devices.host.machine","l":"CommonMachine"},{"p":"neureka.common.composition","l":"Component"},{"p":"neureka.common.composition","l":"ComponentOwner"},{"p":"neureka.devices.host.machine","l":"ConcreteMachine"},{"p":"neureka.backend.main.operations.linear","l":"Convolution"},{"p":"neureka.backend.main.operations","l":"ConvUtil"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"COPY"},{"p":"neureka.backend.main.operations.functions","l":"Cosinus"},{"p":"neureka.devices.host","l":"CPU"},{"p":"neureka.backend.cpu","l":"CPUBackend"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUBiElementWise"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUBiElementWiseAddition"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUBiElementWiseDivision"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUBiElementWiseModulo"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUBiElementWiseMultiplication"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUBiElementWisePower"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUBiElementWiseSubtraction"},{"p":"neureka.backend.main.implementations.fun.api","l":"CPUBiFun"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcast"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcastAddition"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcastDivision"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcastModulo"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcastMultiplication"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcastPower"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcastSubtraction"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUBroadcastSummation"},{"p":"neureka.backend.main.implementations.convolution","l":"CPUConvolution"},{"p":"neureka.backend.main.implementations.linear","l":"CPUDot"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUElementwiseAssignFun"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPUElementwiseFunction"},{"p":"neureka.backend.main.implementations.fun.api","l":"CPUFun"},{"p":"neureka.backend.main.implementations","l":"CPUImplementation"},{"p":"neureka.backend.main.implementations.matmul","l":"CPUMatMul"},{"p":"neureka.backend.main.implementations.elementwise","l":"CPURandomization"},{"p":"neureka.backend.main.operations.other.internal","l":"CPUReduce"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalaBroadcastPower"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalarBroadcast"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalarBroadcastAddition"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalarBroadcastDivision"},{"p":"neureka.backend.main.implementations.scalar","l":"CPUScalarBroadcastFunction"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalarBroadcastIdentity"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalarBroadcastModulo"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalarBroadcastMultiplication"},{"p":"neureka.backend.main.implementations.broadcast","l":"CPUScalarBroadcastSubtraction"},{"p":"neureka.backend.main.implementations.scalar","l":"CPUScalarFunction"},{"p":"neureka.backend.main.operations.other.internal","l":"CPUSum"},{"p":"neureka.devices.file","l":"CSVHandle"},{"p":"neureka.ndim.config.types","l":"D1C"},{"p":"neureka.ndim.config.types","l":"D2C"},{"p":"neureka.ndim.config.types","l":"D3C"},{"p":"neureka","l":"Data"},{"p":"neureka.common.utility","l":"DataConverter"},{"p":"neureka.dtype","l":"DataType"},{"p":"neureka","l":"Neureka.Settings.Debug"},{"p":"neureka.backend.api.template.operations","l":"OperationBuilder.Derivation"},{"p":"neureka.math.args","l":"Arg.Derivative"},{"p":"neureka.math.args","l":"Arg.DerivIdx"},{"p":"neureka.devices","l":"Device"},{"p":"neureka.backend.api","l":"DeviceAlgorithm"},{"p":"neureka.devices","l":"DeviceCleaner"},{"p":"neureka.backend.api","l":"Call.DeviceCondition"},{"p":"neureka.devices","l":"DeviceData"},{"p":"neureka.backend.api","l":"BackendExtension.DeviceOption"},{"p":"neureka.devices.opencl.utility","l":"DeviceQuery"},{"p":"neureka.backend.main.operations.other","l":"DimFit"},{"p":"neureka.backend.main.operations.other","l":"DimTrim"},{"p":"neureka.devices.host.concurrent","l":"WorkScheduler.Divider"},{"p":"neureka.backend.main.operations.operator","l":"Division"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"DOT"},{"p":"neureka.backend.main.operations.linear","l":"DotProduct"},{"p":"neureka.backend.main.algorithms","l":"DotProductAlgorithm"},{"p":"neureka","l":"Neureka.Settings.DType"},{"p":"neureka.backend.main.algorithms","l":"ElementwiseAlgorithm"},{"p":"neureka.backend.main.operations","l":"ElemWiseUtil"},{"p":"neureka.backend.api","l":"Call.Else"},{"p":"neureka.math.args","l":"Arg.Ends"},{"p":"neureka.backend.api","l":"Call.Validator.Estimator"},{"p":"neureka.backend.api.fun","l":"Execution"},{"p":"neureka.backend.api","l":"ExecutionCall"},{"p":"neureka.backend.api.fun","l":"ExecutionPreparation"},{"p":"neureka.backend.main.operations.functions","l":"Exp"},{"p":"neureka.backend.api","l":"Extensions"},{"p":"neureka.dtype.custom","l":"F32"},{"p":"neureka.dtype.custom","l":"F64"},{"p":"neureka.backend.api.template.algorithms","l":"FallbackAlgorithm"},{"p":"neureka.devices.file","l":"FileDevice"},{"p":"neureka.devices.file","l":"FileHandle"},{"p":"neureka.ndim","l":"Filler"},{"p":"neureka.backend.main.internal","l":"FinalExecutor"},{"p":"neureka.common.utility","l":"DataConverter.ForTensor"},{"p":"neureka.fluent.slicing.states","l":"FromOrAt"},{"p":"neureka.fluent.slicing.states","l":"FromOrAtTensor"},{"p":"neureka.backend.api.template.algorithms","l":"FunAlgorithm"},{"p":"neureka.math","l":"Function"},{"p":"neureka.math","l":"FunctionCache"},{"p":"neureka.math.implementations","l":"FunctionConstant"},{"p":"neureka.math.implementations","l":"FunctionInput"},{"p":"neureka.math.implementations","l":"FunctionNode"},{"p":"neureka.math.parsing","l":"FunctionParser"},{"p":"neureka.math","l":"Functions"},{"p":"neureka.math.implementations","l":"FunctionVariable"},{"p":"neureka.backend.api.template.algorithms","l":"FunDeviceAlgorithm"},{"p":"neureka.backend.main.operations.functions","l":"GaSU"},{"p":"neureka.backend.main.operations.functions","l":"GaTU"},{"p":"neureka.backend.main.operations.functions","l":"Gaussian"},{"p":"neureka.backend.main.operations.functions","l":"GaussianFast"},{"p":"neureka.backend.main.operations.functions","l":"GeLU"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"GEMM"},{"p":"neureka.framing.fluent","l":"Get"},{"p":"neureka.autograd","l":"GraphNode"},{"p":"neureka.devices.host.machine","l":"Hardware"},{"p":"neureka.dtype.custom","l":"I16"},{"p":"neureka.dtype.custom","l":"I32"},{"p":"neureka.dtype.custom","l":"I64"},{"p":"neureka.dtype.custom","l":"I8"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"IAXPY"},{"p":"neureka.backend.main.operations.functions","l":"Identity"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"IDOT"},{"p":"neureka.devices.file","l":"IDXHandle"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"IGEMM"},{"p":"neureka","l":"Tensor.ImageType"},{"p":"neureka.backend.api","l":"ImplementationFor"},{"p":"neureka.backend.api.ini","l":"ImplementationReceiver"},{"p":"neureka.devices","l":"Device.In"},{"p":"neureka.devices.host","l":"CPU.IndexedWorkload"},{"p":"neureka.ndim.config","l":"NDConfiguration.IndexToIndexFunction"},{"p":"neureka.math.args","l":"Arg.Indices"},{"p":"neureka.common.composition","l":"Component.IsBeing"},{"p":"neureka","l":"MutateNda.Item"},{"p":"neureka","l":"Nda.Item"},{"p":"neureka.fluent.building.states","l":"IterByOrIterFromOrAll"},{"p":"neureka.fluent.building.states","l":"IterByOrIterFromOrAllTensor"},{"p":"neureka.autograd","l":"JITProp"},{"p":"neureka.devices.host","l":"CPU.JVMExecutor"},{"p":"neureka.devices.opencl","l":"KernelCache"},{"p":"neureka.devices.opencl","l":"KernelCaller"},{"p":"neureka.devices.opencl","l":"KernelCode"},{"p":"neureka.devices.opencl","l":"KernelSource"},{"p":"neureka.math.args","l":"Arg.Layout"},{"p":"neureka.ndim.config","l":"NDConfiguration.Layout"},{"p":"neureka.common.utility","l":"Cache.LazyEntry"},{"p":"neureka.backend.api","l":"LazyRef"},{"p":"neureka.common.utility","l":"ListReader"},{"p":"neureka.backend.api.ini","l":"LoadingContext"},{"p":"neureka.backend.main.operations.functions","l":"Log10"},{"p":"neureka.backend.main.operations.functions","l":"Logarithm"},{"p":"neureka.common.utility","l":"LogUtil"},{"p":"neureka.backend.main.operations.linear","l":"MatMul"},{"p":"neureka.backend.main.algorithms","l":"MatMulAlgorithm"},{"p":"neureka.backend.main.operations.other","l":"Max"},{"p":"neureka.backend.main.memory","l":"MemUtil"},{"p":"neureka.backend.main.memory","l":"MemValidator"},{"p":"neureka.devices.opencl.utility","l":"Messages"},{"p":"neureka.backend.main.operations.other","l":"Min"},{"p":"neureka.math.args","l":"Arg.MinRank"},{"p":"neureka.backend.main.operations.operator","l":"Modulo"},{"p":"neureka.optimization.implementations","l":"Momentum"},{"p":"neureka.optimization.implementations","l":"MomentumFactory"},{"p":"neureka.backend.main.operations.operator","l":"Multiplication"},{"p":"neureka","l":"MutateNda"},{"p":"neureka","l":"MutateTensor"},{"p":"neureka","l":"Nda"},{"p":"neureka.view","l":"NdaAsString"},{"p":"neureka.fluent.building","l":"NdaBuilder"},{"p":"neureka.ndim.config","l":"NDConfiguration"},{"p":"neureka.ndim","l":"NDConstructor"},{"p":"neureka.backend.main.algorithms","l":"NDConvolution"},{"p":"neureka.framing","l":"NDFrame"},{"p":"neureka","l":"Neureka.Settings.NDim"},{"p":"neureka.ndim","l":"NDimensional"},{"p":"neureka.ndim.iterator","l":"NDIterator"},{"p":"neureka.view","l":"NDPrintSettings"},{"p":"neureka.ndim.config","l":"NDTrait"},{"p":"neureka.ndim","l":"NDUtil"},{"p":"neureka","l":"Neureka"},{"p":"neureka.ndim.iterator","l":"NDIterator.NonVirtual"},{"p":"neureka.dtype","l":"NumericType"},{"p":"neureka.math.args","l":"Arg.Offset"},{"p":"neureka.devices.opencl","l":"OpenCLDevice"},{"p":"neureka.devices.opencl","l":"OpenCLPlatform"},{"p":"neureka.backend.api","l":"Operation"},{"p":"neureka.backend.api.template.operations","l":"OperationBuilder"},{"p":"neureka.backend.api","l":"Call.OperationCondition"},{"p":"neureka.optimization","l":"Optimization"},{"p":"neureka.optimization","l":"Optimizer"},{"p":"neureka.optimization","l":"OptimizerFactory"},{"p":"neureka.common.composition","l":"Component.OwnerChangeRequest"},{"p":"neureka.devices.host.concurrent","l":"Parallelism"},{"p":"neureka.backend.main.implementations","l":"ParsedCLImplementation"},{"p":"neureka.math.parsing","l":"ParseUtil"},{"p":"neureka.backend.main.operations.other","l":"Permute"},{"p":"neureka.ndim.config.types.permuted","l":"Permuted1DConfiguration"},{"p":"neureka.ndim.iterator.types.permuted","l":"Permuted2DCIterator"},{"p":"neureka.ndim.config.types.permuted","l":"Permuted2DConfiguration"},{"p":"neureka.ndim.iterator.types.permuted","l":"Permuted3DCIterator"},{"p":"neureka.ndim.config.types.permuted","l":"Permuted3DConfiguration"},{"p":"neureka.ndim.config.types.permuted","l":"PermutedNDConfiguration"},{"p":"neureka.backend.main.operations.operator","l":"Power"},{"p":"neureka.autograd","l":"GraphNode.Print"},{"p":"neureka.backend.main.operations.indexer","l":"Product"},{"p":"neureka.backend.main.operations.functions","l":"Quadratic"},{"p":"neureka.devices.opencl","l":"OpenCLDevice.Query"},{"p":"neureka.backend.main.operations.other","l":"Randomization"},{"p":"neureka.devices.host","l":"CPU.RangeWorkload"},{"p":"neureka.backend.api.ini","l":"ReceiveForDevice"},{"p":"neureka.backend.api.ini","l":"ReceiveForOperation"},{"p":"neureka.devices","l":"ReferenceCounter"},{"p":"neureka.framing","l":"Relation"},{"p":"neureka.backend.main.operations.other","l":"ReLayout"},{"p":"neureka.backend.main.operations.functions","l":"ReLU"},{"p":"neureka.framing.fluent","l":"Replace"},{"p":"neureka.backend.main.operations.other","l":"Reshape"},{"p":"neureka.common.utility","l":"ListReader.Result"},{"p":"neureka.backend.api","l":"Result"},{"p":"neureka.optimization.implementations","l":"RMSProp"},{"p":"neureka.optimization.implementations","l":"RMSPropFactory"},{"p":"neureka.backend.api","l":"BackendContext.Runner"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarAbsolute"},{"p":"neureka.backend.main.algorithms","l":"ScalarAlgorithm"},{"p":"neureka.backend.main.algorithms","l":"ScalarBroadcast"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarCbrt"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarCosinus"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarExp"},{"p":"neureka.backend.main.implementations.fun.api","l":"ScalarFun"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarGaSU"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarGaTU"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarGaussian"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarGaussianFast"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarGeLU"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarIdentity"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarLog10"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarLogarithm"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarQuadratic"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarReLU"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarSeLU"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarSigmoid"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarSiLU"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarSinus"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarSoftplus"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarSoftsign"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarSqrt"},{"p":"neureka.backend.main.algorithms","l":"ScalarSumAlgorithm"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarTanh"},{"p":"neureka.backend.main.implementations.fun","l":"ScalarTanhFast"},{"p":"neureka.math.args","l":"Arg.Seed"},{"p":"neureka.backend.main.operations.functions","l":"SeLU"},{"p":"neureka.framing.fluent","l":"AxisFrame.Set"},{"p":"neureka.framing.fluent","l":"Set"},{"p":"neureka","l":"Neureka.Settings"},{"p":"neureka.common.utility","l":"SettingsLoader"},{"p":"neureka.optimization.implementations","l":"SGD"},{"p":"neureka.optimization.implementations","l":"SGDFactory"},{"p":"neureka.math.args","l":"Arg.Shape"},{"p":"neureka","l":"Shape"},{"p":"neureka.backend.main.operations.functions","l":"Sigmoid"},{"p":"neureka.backend.main.operations.functions","l":"SiLU"},{"p":"neureka.ndim.config.types.simple","l":"Simple0DConfiguration"},{"p":"neureka.ndim.iterator.types.simple","l":"Simple1DCIterator"},{"p":"neureka.ndim.config.types.simple","l":"Simple1DConfiguration"},{"p":"neureka.ndim.iterator.types.simple","l":"Simple2DCIterator"},{"p":"neureka.ndim.config.types.simple","l":"Simple2DConfiguration"},{"p":"neureka.ndim.iterator.types.simple","l":"Simple3DCIterator"},{"p":"neureka.ndim.config.types.simple","l":"Simple3DConfiguration"},{"p":"neureka.backend.main.implementations","l":"SimpleCLImplementation"},{"p":"neureka.ndim.config.types.simple","l":"SimpleNDConfiguration"},{"p":"neureka.ndim.config.types.views","l":"SimpleReshapeView"},{"p":"neureka.backend.main.operations.functions","l":"Sinus"},{"p":"neureka.backend.main.operations.other","l":"Slice"},{"p":"neureka.fluent.slicing","l":"SliceBuilder"},{"p":"neureka.ndim.config.types.sliced","l":"Sliced0DConfiguration"},{"p":"neureka.ndim.iterator.types.sliced","l":"Sliced1DCIterator"},{"p":"neureka.ndim.config.types.sliced","l":"Sliced1DConfiguration"},{"p":"neureka.ndim.iterator.types.sliced","l":"Sliced2DCIterator"},{"p":"neureka.ndim.config.types.sliced","l":"Sliced2DConfiguration"},{"p":"neureka.ndim.iterator.types.sliced","l":"Sliced3DCIterator"},{"p":"neureka.ndim.config.types.sliced","l":"Sliced3DConfiguration"},{"p":"neureka.ndim.config.types.sliced","l":"SlicedNDConfiguration"},{"p":"neureka.ndim.iterator.types.sliced","l":"SlicedNDIterator"},{"p":"neureka.fluent.slicing","l":"SmartSlicer"},{"p":"neureka.backend.main.operations.functions","l":"Softplus"},{"p":"neureka.backend.main.operations.functions","l":"Softsign"},{"p":"neureka.backend.main.operations.functions","l":"Sqrt"},{"p":"neureka.devices.opencl","l":"StaticKernelSource"},{"p":"neureka.fluent.building.states","l":"Step"},{"p":"neureka.fluent.building.states","l":"StepForTensor"},{"p":"neureka.fluent.slicing.states","l":"StepsOrAxisOrGet"},{"p":"neureka.fluent.slicing.states","l":"StepsOrAxisOrGetTensor"},{"p":"neureka.devices","l":"Storage"},{"p":"neureka.math.args","l":"Arg.Stride"},{"p":"neureka.backend.api.template.operations","l":"OperationBuilder.Stringifier"},{"p":"neureka.backend.main.operations.operator","l":"Subtraction"},{"p":"neureka.backend.api.fun","l":"SuitabilityPredicate"},{"p":"neureka.backend.main.operations.other","l":"Sum"},{"p":"neureka.backend.main.algorithms","l":"SumAlgorithm"},{"p":"neureka.backend.main.operations.indexer","l":"Summation"},{"p":"neureka.backend.main.operations.functions","l":"Tanh"},{"p":"neureka.backend.main.operations.functions","l":"TanhFast"},{"p":"neureka.math.args","l":"Arg.TargetDevice"},{"p":"neureka","l":"Tensor"},{"p":"neureka.backend.api","l":"Call.TensorCompare"},{"p":"neureka.backend.api","l":"Call.TensorCondition"},{"p":"neureka.backend.api","l":"Call.TensorProperty"},{"p":"neureka.backend.api","l":"Call.TensorsCondition"},{"p":"neureka.devices.opencl.utility","l":"Messages.Tips"},{"p":"neureka.fluent.building.states","l":"To"},{"p":"neureka.fluent.slicing.states","l":"To"},{"p":"neureka.fluent.building.states","l":"ToForTensor"},{"p":"neureka.fluent.slicing.states","l":"ToForTensor"},{"p":"neureka.backend.main.operations.linear.internal.opencl","l":"CLReduce.Type"},{"p":"neureka.backend.main.operations.other.internal","l":"CPUReduce.Type"},{"p":"neureka.devices.opencl","l":"OpenCLDevice.Type"},{"p":"neureka.dtype.custom","l":"UI16"},{"p":"neureka.dtype.custom","l":"UI32"},{"p":"neureka.dtype.custom","l":"UI64"},{"p":"neureka.dtype.custom","l":"UI8"},{"p":"neureka.view","l":"NdaAsString.Util"},{"p":"neureka.backend.main.algorithms","l":"Util"},{"p":"neureka.common.utility","l":"DataConverter.Utility"},{"p":"neureka.ndim.config","l":"NDConfiguration.Utility"},{"p":"neureka","l":"Neureka.Utility"},{"p":"neureka.backend.api","l":"Call.Validator"},{"p":"neureka.math.args","l":"Arg.VarIdx"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"GEMM.VectorOperationF32"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"GEMM.VectorOperationF64"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"IGEMM.VectorOperationI32"},{"p":"neureka.backend.main.operations.linear.internal.blas","l":"IGEMM.VectorOperationI64"},{"p":"neureka","l":"Neureka.Settings.View"},{"p":"neureka.ndim.config.types.views.virtual","l":"VirtualNDConfiguration"},{"p":"neureka.ndim.iterator.types.virtual","l":"VirtualNDIterator"},{"p":"neureka.framing.fluent","l":"With"},{"p":"neureka.backend.main.algorithms.internal","l":"WithForward"},{"p":"neureka.fluent.building.states","l":"WithShapeOrScalarOrVector"},{"p":"neureka.fluent.building.states","l":"WithShapeOrScalarOrVectorOnDevice"},{"p":"neureka.fluent.building.states","l":"WithShapeOrScalarOrVectorTensor"},{"p":"neureka.devices.host.concurrent","l":"WorkScheduler"},{"p":"neureka.devices","l":"Device.Writer"},{"p":"neureka.backend.main.operations.linear","l":"XConvLeft"},{"p":"neureka.backend.main.operations.linear","l":"XConvRight"}];updateSearchResults(); \ No newline at end of file diff --git a/docs/jdocs/type-search-index.zip b/docs/jdocs/type-search-index.zip deleted file mode 100644 index 3d0a71510..000000000 Binary files a/docs/jdocs/type-search-index.zip and /dev/null differ diff --git a/docs/spock/aggregated/aggregated_report.json b/docs/spock/aggregated/aggregated_report.json index b9ac15cd6..e7aba8a7b 100644 --- a/docs/spock/aggregated/aggregated_report.json +++ b/docs/spock/aggregated/aggregated_report.json @@ -1 +1 @@ -{"Example_Spec.Example_Spec":{"executedFeatures":["Call me feature not unit test!","I am readable and also best practice!","Numbers to the power of two with a fancy data table!","Should be able to remove from list","iAmNotSoReadable"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":38},"title":"An Introduction to writing Spock Specifications","narrative":"Hello and welcome to the example / template specification of this project.\n This is a simple introduction as to how to get started writing Spock specifications.\n \n Spock works on top of Groovy which is in essence a syntactic super-set of Java.\n That means that one can write Java code in Groovy, and 99% of the time it will \n work the exact same way."},"it.Calculus_Stress_Test":{"executedFeatures":["Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors.","Activation functions work across types.","Dot operation stress test runs error free and produces expected result","Stress test runs error free and produces expected result","The broadcast operation stress test runs error free and produces expected result"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":756},"title":"","narrative":""},"it.Cross_Device_Sliced_Tensor_System_Test":{"executedFeatures":["Cross device sliced tensor integration test runs without errors.","Slices can be created using the SliceBuilder."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":70},"title":"Cross Device Tensor Slicing","narrative":""},"it.Cross_Device_Spec":{"executedFeatures":["A gradient of ones can be set by calling the backward method on a tensor sitting on any device.","Convolution can model matrix multiplications across devices.","Cross device system test runs successfully.","Mapping tensors works for every device (even if they are not used).","Test simple NN implementation with manual backprop"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":9201},"title":"Cross Device Stress Test Specification","narrative":"This specification is pretty much a system test which covers\n the behavior of the library as a whole across multiple devices!\n No matter which device is being used for a given stress test, the result should be the same..."},"it.Eleven_Lines_NN_System_Spec":{"executedFeatures":["One can write a simple double based neural network in less than 11 lines of java like code using the \"@\" operator!","One can write a simple float based neural network in less than 11 lines of java like code!","One can write a simple neural network in less than 11 lines of code!","One can write a simple neural network with custom back-prop in 11 lines of code!","The pseudo random number generator works as expected for the weights used in the 11 line NN examples!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":295},"title":"NN Code Golfing!","narrative":"This system test specification uses the following Numpy\n code as reference implementation for the equivalent in Neureka\n or similar implementations and variations.\n The code below is a simple neural network in only 11 lines of code.\n\n \u00b4\u00b4\u00b4\n X = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1] ])\n y = np.array([[0,1,1,0]]).T\n W1 = 2*np.random.random((3,4)) - 1\n W2 = 2*np.random.random((4,1)) - 1\n for j in xrange(60000):\n l1 = 1/(1+np.exp(-(np.dot(X,W1))))\n l2 = 1/(1+np.exp(-(np.dot(l1,W2))))\n l2_delta = (y - l2)*(l2*(1-l2))\n l1_delta = l2_delta.dot(W2.T) * (l1 * (1-l1))\n W2 += l1.T.dot(l2_delta)\n W1 += X.T.dot(l1_delta)\n \u00b4\u00b4\u00b4"},"st.Benchmark_System_Test":{"executedFeatures":["Tensor can be constructed by passing List instances.","Test benchmark script and simple tensor constructor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":1281},"title":"","narrative":""},"st.Broad_System_Test":{"executedFeatures":["The long broad integration test runs successfully."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":33},"title":"","narrative":""},"st.NN_Concepts_Spec":{"executedFeatures":["The attention mechanism (found in the commonly known transformer) demonstrated."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":135},"title":"Examining Neural Network Architecture Snippets","narrative":"This specification is intended to showcase some basic building blocks of \n various neural network architectures."},"st.Training_NNs_Spec":{"executedFeatures":["A simple 3 layer neural network converges.","A very simple 1 layer NN converges.","We can run the attention head test model."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":6994},"title":"Training a Neural Network Class","narrative":"When designing larger neural network architectures, what you would usually do is\n to create a class that represents the whole model (which itself might be composed\n of smaller models). \n \n This class would then represent something that can be executed and then trained.\n This Specification shows how to instantiate, execute and train various \n pre-defined example neural network models."},"ut.autograd.AD_And_Computation_Graph_Spec":{"executedFeatures":["Payloads and derivatives are null after garbage collection.","Reshaping produces expected computation graph and also works with reverse mode AD."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":621},"title":"","narrative":""},"ut.autograd.Autograd_Explained":{"executedFeatures":["Simple automatic differentiation and propagation."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":7},"title":"Autograd - Automatic Differentiation","narrative":"Central to all neural networks in Neureka is the autograd package. \n The autograd package provides automatic differentiation for all default operations on Tensors. \n Neureka is a define-by-run library, which means that your backpropagation is defined by how \n your code is run, and that every single iteration can be different. \n \n The class neureka.Tensor is the central class of the main package. \n If you set its attribute 'rqsGradient' to True, Neureka starts to track all operations on it. \n When you finish the forward pass of your network \n you can call .backward() and have all the gradients computed \n and distributed to the tensors requiring them automatically. \n \n The gradient for a tensor will be accumulated into a child tensor (component) which \n can be accessed via the '.getGradient()' method. \n \n To stop a tensor from tracking history, you can call '.detach()' to detach it from the \n computation history, and to prevent future computation from being tracked."},"ut.autograd.Autograd_Flags_Explained":{"executedFeatures":["Advanced backpropagation on all AD-Modes ","We can create a shallow copy of a tensor detached from the computation graph."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":2,"passed":9,"successRate":1.0,"time":48},"title":"","narrative":""},"ut.autograd.Autograd_NN_Spec":{"executedFeatures":["Autograd work for simple matrix multiplications.","Autograd works for 2 matrix multiplications in a row.","Autograd works in a simple convolutional dot product and float based feed forward neural network.","Autograd works in a simple convolutional dot product based feed forward neural network.","Autograd works in a simple mat-mul based feed forward neural network."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":79},"title":"Simple Neural Network autograd integration test","narrative":"The integration test below has been implemented by using\n the following code and the result it produces as reference : \n https://medium.com/dair-ai/a-simple-neural-network-from-scratch-with-pytorch-and-google-colab-c7f3830618e0 \n \n The following seed has been used to assure reproducibility :\n 'torch.manual_seed(503672689411)'"},"ut.autograd.Autograd_Tensor_Spec":{"executedFeatures":["A tensor used as derivative within a computation graph will throw exception when trying to deleting it.","Second-Test \"x-mul\" autograd behaviour. (Not on device)","Test basic autograd behaviour. (Not on device)"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":20},"title":"","narrative":""},"ut.autograd.JITProp_Autograd_Tensor_Spec":{"executedFeatures":["Gradient auto-apply kicks in when used AD uses JIT prop","Test JIT propagation variant one.","Test JIT propagation variant two.","Test autograd without JIT and auto apply.","Test in-differential and JIT with auto apply","Test no JIT prop when forward AD","Test no preemptive gradient apply when not requested and auto apply and JIT_prop","Test pending error optimization"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":132},"title":"","narrative":""},"ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests":{"executedFeatures":["GraphNode instantiation throws exception because tensors of ExecutionCall do not return GraphNode instances.","GraphNode throws an exception when trying to execute an inline operation on inputs with active autograd."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":139},"title":"","narrative":""},"ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests":{"executedFeatures":["A tensor cannot be deleted if it is part of a graph and the tensor is used as derivative."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":4},"title":"","narrative":""},"ut.backend.Backend_Extension_Spec":{"executedFeatures":["Mock operation interacts with FunctionNode (AbstractFunction) instance as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":53},"title":"","narrative":""},"ut.backend.Backend_MatMul_Extension_Spec":{"executedFeatures":["GEMM matrix multiplication reference implementation can be set as custom OperationType and works as expected.","Test context mock for opencl reference implementations.","Tile parsing for kernel parameter calculation yields expected tile dimensions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":54},"title":"","narrative":""},"ut.backend.Matrix_Multiplication_Spec":{"executedFeatures":["The \"matMul\" method allows us to perform matrix multiplication.","The simple CPU matrix multiplication implementation works as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":27},"title":"Matrix Multiplication","narrative":"The tensor API exposes a useful method for Matrix Multiplication.\n This specification not only demonstrates how to use this method\n but also shows how matrix multiplication work \n for tensors with both row and column major layouts.\n (typically, column major is faster)"},"ut.backend.core.Backend_Algorithm_AD_Spec":{"executedFeatures":["Activation implementations behave as expected.","Broadcast implementations have expected properties.","Convolution implementations behave as expected.","Operator implementations behave as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":52},"title":"","narrative":""},"ut.backend.core.Backend_Algorithm_Implementation_Spec":{"executedFeatures":["Activation implementations have expected Executor instances.","HostExecutors of Operator implementations behave as expected.","Operator implementations have expected Executor instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":65},"title":"","narrative":""},"ut.backend.core.Backend_Functional_Algorithm_Spec":{"executedFeatures":["A functional algorithm cannot be used if it was not built properly!","A functional algorithm does not accept null as an answer!","A functional algorithm warns us when modified after it has been built!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":11},"title":"","narrative":""},"ut.backend.core.Matrix_Multiplication_Spec":{"executedFeatures":["The CPU matrix multiplication implementation works as expected.","The internal matrix multiplication test script runs!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":1154},"title":"Internal CPU based Matrix Multiplication","narrative":"This specification covers library internal matrix multiplication logic,\n specifically the CPU implementation.\n Do not depend on the API used in this specification as it is subject to change!"},"ut.backend.core.OpenCL_Backend_Spec":{"executedFeatures":["The OpenCL backend context can load implementations."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":7},"title":"","narrative":""},"ut.backend.core.Randomization_Spec":{"executedFeatures":["Randomization is in essence the same algorithm as JDKs \"Random\".","The Randomization class can fill various types of arrays with pseudo random numbers.","We can make slices of tensors random."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":22},"title":"","narrative":""},"ut.device.CPU_Spec":{"executedFeatures":["CPU knows the current number of available processor cores!","The CPU device will keep track of the amount of tensors it stores.","The CPU exposes a non null API for executing workloads in parallel.","Thread pool executes given workload in parallel"],"ignoredFeatures":[],"stats":{"failures":1,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":3,"successRate":0.75,"time":5449},"title":"The CPU device, an API for CPU based execution","narrative":"The CPU class, one of many implementations of the Device interface, \n is simply supposed to be an API for dispatching threaded workloads onto the CPU.\n Contrary to other types of device, the CPU will host tensor data by default, simply\n because the tensors will be stored in RAM if no device was specified."},"ut.device.Cross_Device_IO_Spec":{"executedFeatures":["We can use the access device API to read from a tensor.","We can use the access device API to write to a tensor"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":16},"title":"Devices manage the states of the tensors they store!","narrative":"Tensors should not manage their states\n themselves, simply because the type and location\n of the data is dependent on the device onto which they are stored.\n This specification tests of various device implementations\n enable reading to or writing from the tensors they store."},"ut.device.Cross_Device_Type_Spec":{"executedFeatures":["A device will keep track of the amount of tensors and data objects it stores.","Devices expose an API for accessing (reading and writing) the data of a tensor.","Devices store slices which can also be restored just like any other tensor.","Devices store tensors which can also be restored.","Execution calls containing null arguments will cause an exception to be thrown in device instances.","In total there are 3 different types of methods for finding device instances.","Passing a numeric array to a tensor should modify its contents!","Virtual tensors stay virtual when outsourced.","We can find Device implementations or null by passing search keys to the \"get\" method.","We can query the backend for devices by specifying both the requested type and a key word."],"ignoredFeatures":["Devices cannot store slices whose parents are not already stored."],"stats":{"failures":1,"errors":0,"skipped":1,"totalRuns":10,"totalFeatures":11,"passed":9,"successRate":0.9,"time":10184},"title":"Finding Device Types","narrative":"Neureka introduces a the concept of a `Device` which is an interface\n that represents a computational device used for executing tensor / nd-array operations on them.\n The `Device` interface is implemented by various classes which represent\n different types of accelerator hardware such as `CPUs`, `GPUs`, `TPUs`, `FPGAs`, etc.\n These various `Device` types can not be instantiated directly because they model \n the concrete and finite hardware that is available on any given system Neureka is running on.\n This means that they are usually instantiated lazily upon access request or \n upfront by the library backend (usually a backend extension built fo a specific device).\n In order to find these instances embedded in the library backend the `Device` interface\n exposes various static methods which can be used to find a device instance by name or type."},"ut.device.FileDevice_Spec":{"executedFeatures":["A file device stores tensors in idx files by default.","A file device stores tensors in various file formats.","A tensor loaded from a file device can be loaded again.","The file device can load known files in a directory."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":54},"title":"FileDevice, Storing Tensors in Files","narrative":"The `FileDevice` class, one of many implementations of the `Device` interface, \n represents a file directory which can store and load tensors as files (`idx`, `jpg`, `png`...)."},"ut.device.OpenCLDevice_Exception_Spec":{"executedFeatures":["Ad hoc compilation produces expected exceptions when duplication is found.","Ad hoc compilation produces expected exceptions.","An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","Trying to restore a tensor which is not on a device raises exception."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":77},"title":"OpenCLDevice Exception Handling","narrative":"The OpenCLDevice class, one of many implementations of the Device interface, \n represents physical OpenCL devices.\n This specification defines how instances of this class deal with exceptional information."},"ut.device.OpenCLDevice_Spec":{"executedFeatures":["Ad hoc compilation produces executable kernel.","Ad hoc compilation works for WIP general purpose matrix multiplication.","Ad hoc compilation works for custom column major based tiled matrix multiplication.","Ad hoc compilation works for custom simple row major based matrix multiplication.","Ad hoc matrix multiplication works for multiple of 16 matrices.","An OpenCLDevice loads tensors in a provided lambda temporarily.","We can get the items of an outsourced tensor as a primitive array.","We can take a look at the underlying data array of an outsourced tensor through the unsafe API."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":2269},"title":"The OpenCLDevice Specification","narrative":"Tensors need devices for execution!\n By default tensors use the `CPU` device, but sometimes we want to\n use something more suitable for large amounts of data and a high degree of parallelization.\n This is were the `OpenCLDevice` comes into play!\n It is a `Device` implementation built on top of the JOCL library, a thin OpenCL API.\n We expect the `OpenCLDevice` to store tensors as well as being able to read and write\n data from and to stored tensors.\n Also, an `OpenCLDevice` should allows us to compile OpenCL kernel code on the fly..."},"ut.device.OpenCL_Spec":{"executedFeatures":["A given OpenCL context can be disposed!","An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","First found OpenCLDevice will have realistic numeric properties.","First found OpenCLDevice will have realistic properties inside summary query.","First found OpenCLDevice will have realistic text properties."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":2988},"title":"Working with OpenCL","narrative":"Neureka models the OpenCL API through various types of classes.\n The most fundamental of these is the `OpenCLDevice` class which\n represents a single device with OpenCL support.\n Besides that, there is also the `OpenCLContext` class which\n represents a OpenCL contexts, platforms and multiple devices on said platforms..."},"ut.device.internal.CLFunctionCompiler_Spec":{"executedFeatures":["The CLFunctionCompiler produces an operation which properly integrates to the backend.","The CLFunctionCompiler produces the expected \"ad hoc\" kernel.","The OpenCLDevice produces a working optimized Function for doubles.","The OpenCLDevice produces a working optimized Function for floats."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":173},"title":"Turning functions into kernels.","narrative":"Neureka parses mathematical expressions into an AST representation\n hidden behind the Function interface...\n This feature does not exist without reason, we can use\n this abstract syntax tree to compile to OpenCL kernels\n for optimal execution speed!"},"ut.device.internal.CPU_Kernel_Spec":{"executedFeatures":["The Reduce implementation for the CPU has realistic behaviour","The Sum implementation for the CPU has realistic behaviour"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":28},"title":"","narrative":""},"ut.device.internal.OpenCL_Data_Spec":{"executedFeatures":["The \"Data\" class can represent various OpenCL data types.","The OpenCLDevice specific Data class represents JVM data for OpenCL."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":20},"title":"","narrative":""},"ut.device.internal.OpenCL_Kernel_Unit_Spec":{"executedFeatures":["The CLDot implementation for the OpenCLDevice has realistic behaviour","The GEMM implementation for the OpenCLDevice has realistic behaviour","The Reduce implementation for the OpenCLDevice has realistic behaviour","The Sum implementation for the OpenCLDevice has realistic behaviour","The Sum implementation for the OpenCLDevice has realistic behaviour for when the number of elements is a prime."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":13},"title":"","narrative":""},"ut.dtype.DataType_Spec":{"executedFeatures":["DataType multi-ton instances behave as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":5},"title":"","narrative":""},"ut.dtype.NumericType_Spec":{"executedFeatures":["Conversion goes both ways and produces expected numeric values.","NumericType conversion to holder types yields expected results.","NumericType implementations behave as expected.","NumericType implementations return their expected properties."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":23},"title":"The NumericType and its implementations model their respective numeric data types.","narrative":"This specification covers the behavior of the NumericType interface\n which is responsible for modelling numeric data types which may or may not be native to the JVM. \n These implementations however do not model them in the traditional OO style\n but merely expose useful utility method for converting and representing \n these numeric data types using JVM types."},"ut.framing.Tensor_Framing_Spec":{"executedFeatures":["A matrix (rank 2 tensor) can be labeled and their labels can be used to extract slices / subsets.","A tensor can be labeled partially.","Rank 3 tensors can be labeled and their labels can be used to extract slices / subsets of tensors.","We can add labels to tensors through lists or maps passed to the \"label()\" method."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":16},"title":"Naming Tensors and their Dimensions.","narrative":"A powerful concept in the data science as well as machine learning\n world is something usually referred to as \"Data Frames\".\n These are highly flexible 2D data structures\n used to load and store CSV, CRV, etc... files for \n data exploration and further processing.\n Data frames are so powerful because\n their indices are labeled and therefore human readable.\n Neureka's tensors are general purpose data containers\n which may also stored data in 2 dimensions whose\n indices may also be something other than integers."},"ut.introductions.Tensor_NDArray_Spec":{"executedFeatures":["Tensor is a subtype of NdArray.","We can use tensors for numeric calculations (but not nd-arrays)."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":4},"title":"Tensors or Nd-arrays","narrative":"*What is the difference?*\n\nIn the world of machine learning we use something called a **'tensor'** to represent data.\nThey might be called **'nd-arrays'** in some other frameworks,\nbut although they are very similar, \nthere are also some important distinctions to be made between these two concepts.\nBoth are at their core merely multidimensional arrays, however,\nthey are different in their typical usage and API.\nnd-arrays are merely used to represent any type of data as a \ncollection of elements in a multidimensional grid, \ntensors on the other hand have additional requirements.\nThey are a type of nd-array which stores numeric data \nas well as expose various mathematical operations for said data.\nIn that sense it is actually merely a more complex kind of number.\nThis concept actually comes from the field of physics, \nwhere it is used to represent a physical quantity.\n\nNeureka models both concepts through the `Tensor` and the `Nda` interfaces.\n`Nda` is an abbreviation of `NdArray`, and `Tensor` is an abbreviation of `Tensor`.\nThe `Tensor` type is a subtype of the `Nda` type, exposing additional methods\nlike for example `plus`, `minus`, `times` and `divide`.\nBoth can be instantiated through static factory methods (and a fluent builder API)."},"ut.math.BackendContext_Spec":{"executedFeatures":["BackendContext instances can be created by cloning from Singleton instance.","BackendContext instances return Runner instances for easy visiting with return values.","BackendContext instances return Runner instances for easy visiting."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":7},"title":"The BackendContext is a cloneable context which can run Tasks.","narrative":"This specification defines the expected behaviour of the backend context\n which should expose a convenient API to work with.\n This API should allow for tasks to be running on a given context\n which is important for testing and modularity not only\n during library startup but also throughout the runtime."},"ut.math.ConCat_Spec":{"executedFeatures":["We can concatenate 2 float tensors alongside a specified axis!","We can concatenate 2 string tensors alongside a specified axis!","We can concatenate 2 tensors alongside a specified axis!","We can concatenate and then back-propagate 2 simple float tensors alongside a specified axis!","We can concatenate and then back-propagate 3 simple float tensors alongside a specified axis!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":17},"title":"Merging Tensors","narrative":"Tensors can not only be sliced, but also merged.\n This is most easily achieved through the concatenation operation, \n which stacks 2 tensors alongside a specified axis.\n This specification not only covers how you can concatenate tensors,\n but also how this works alongside autograd and non-numeric tensors."},"ut.math.Function_Exception_Spec":{"executedFeatures":["Function throws exception when arity does not match input number.","Function throws exception when not enough inputs provided."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":2},"title":"","narrative":""},"ut.math.Function_Parsing_Spec":{"executedFeatures":["Functions can derive themselves according to the provided index of the input which ought to be derived.","Parsed equations throw expected error messages.","Test parsed equations when building Function instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":17},"title":"Parsing Expressions into Functions","narrative":"Neureka uses the 'Function' interface as a representation of a\n nested structure of operations.\n This means that a 'Function' is simply an abstract syntax trees made up of other 'Function' implementations\n which are assembled together by a parser receiving a string expression.\n In this specification we ensure that function expressions will be properly parsed into\n 'Function' implementations."},"ut.math.Function_Scalar_Spec":{"executedFeatures":["Function \"(I[0]+1/I[0])**-I[0]\" instance returns expected scalar result.","Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars.","Function \"1/I[0]\" instance returns expected scalar results.","Function \"I[0]+1/I[0]\" instance returns expected scalar results.","Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance.","Test scalar results of various Function instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":22},"title":"Functions for Scalars","narrative":"The Function API and it's implementations \n receive and process arrays of scalars as arguments.\n Functions don't have to be used alongside tensors / nd-arrays,\n they can also compute derivatives based on scalar values."},"ut.math.Function_Spec":{"executedFeatures":["Function implementations ensure that internally created tensors are flagged as \"intermediate\" initially!","Function implementations ensure that outputs which are input members are not flagged as \"intermediate\"!","Function implementations will ensure the \"call\" and \"invoke\" does not return tensors flagged as \"intermediate\".","The library context exposes a set of useful functions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":23},"title":"Testing Default Methods on Functions","narrative":"This specification tests the default methods on functions\n through a simple dummy implementation of the Function interface."},"ut.math.Tensor_Function_Spec":{"executedFeatures":["Executed tensors are intermediate tensors.","Reshaping on 3D tensors works by instantiate a Function instance built from a String.","Tensor results of various Function instances return expected results.","The \"DimTrim\" operation works forward as well as backward!","The optimization function for the SGD algorithm produces the expected result","The softmax can be calculated alongside multiple axes.","The softmax can be calculated for a particular axis.","The softmax function can be applied to tensors with more than one dimension.","The tensor API has built-in methods for applying functions.","We can collect a stream into a tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":10,"passed":10,"successRate":1.0,"time":78},"title":"Applying Functions to Tensors","narrative":"A tensor would be nothing without being able to apply operations on them.\n However, calling operations manually in order to process your\n tensors can be a verbose and error prone task.\n This is where functions come into play.\n Neureka's functions are composed of operations forming an abstract syntax tree.\n Passing tensors to a function will route them trough this tree and apply\n all of the operations on the tensors for you."},"ut.miscellaneous.Weired_NN_Spec":{"executedFeatures":["Dot based feed forward and activation produces expected result."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":21},"title":"","narrative":"This specification is meant less as feature documentation and more as a\n chaos test for weired neural network architectures\n an unusual usages of the Neureka library."},"ut.ndas.Nda_Assign_Spec":{"executedFeatures":["Assignment can be easily achieved through subscription operators.","We can assign one slice into another one.","We can use the \"mut\" API to assign the contents of one nd-array into another one."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"Nda Inline Assignment","narrative":"In this specification we cover the behaviour of nda's with respect to the assignment operation\n as well as the assignment of individual Nda items."},"ut.ndas.Nda_Framing_Spec":{"executedFeatures":["An Nda can be labeled.","Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.","The slice of a labeled vector is labeled too.","We can label the columns and rows of a rank 3 nd-array.","We can label the columns of a rank 2 nd-array.","We can use labels as selectors for slicing."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":9},"title":"Nda framing","narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n \n This is also true for labeling operations, \n meaning that the Nda API does not directly expose methods that mutate labels of an Nda\n but instead provides methods that return new instances of Nda\n with different labels.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.ndas.Nda_Inplace_Framing_Spec":{"executedFeatures":["Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.","The slice of a labeled vector is labeled too.","We can concatenate more than 2 nd-arrays.","We can label the columns and rows of a rank 3 nd-array.","We can label the columns of a rank 2 nd-array.","We can use labels as selectors for slicing."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":5},"title":"NDA Framing","narrative":"Framing an nd-array is all about naming its axes and then using those names to\n access, read or write its values in a more convenient and human readable way."},"ut.ndas.Nda_Instantiation_Spec":{"executedFeatures":["A vector can be created from an array of values through the \"of\" method.","Common types of nd-arrays are best instantiated using type specific convenience methods.","ND-arrays can be created fluently."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":4},"title":"ND-Array Instantiation","narrative":"In this specification we cover how ND-arrays can be instantiated."},"ut.ndas.Nda_Items_Spec":{"executedFeatures":["An item can be converted to an Optional object.","Other than the \"orElse(T)\" method of the Optional class, the same method of an Item will throw an exception if the provided value is null.","The \"get\" method of an Item object will throw an exception if the item is missing.","We can check if items of a tensor is present or not.","We can get the value of an item.","We can use the \"orElse(T)\" method to avoid null values."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":4},"title":"The Nds Items API","narrative":"Nd-arrays are collections of items similar to other\n collection types in Java. \n One useful way to access the items of an nd-array is\n to use the items API.\n \n Using the `at` methods we can access an `Item` object\n which is a wrapper around the item's value and its\n index in the nd-array.\n \n The `Item` object is a simple data class which\n is very similar to the `Optional` class, meaning\n that it can either be empty or contain a value."},"ut.ndas.Nda_Mutation_Spec":{"executedFeatures":["A ND-Array can be mutated simply using the \"set\" method.","A ND-Array can be mutated using the \"at(..).set(..)\" methods.","A simple vector ND-Array can be mutated using the \"at(..).set(..)\" methods.","A simple vector ND-Array can be mutated using the \"setItemAt\" method.","We can use the subscription operator to mutate a simple vector ND-Array.","We can use the subscription operator to mutate an ND-Array."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":3},"title":"Mutating ND-Arrays","narrative":"ND-Arrays should be considered immutable, so we should prefer creating new \n ND-Arrays from existing ones using wither methods.\n However this is not always a good idea as it can be expensive to create new\n ND-Arrays, especially if the ND-Array is very large.\n The ability to mutate ND-Arrays is therefore provided, but only\n accessible via the mutation API exposed by the `getMut()` method."},"ut.ndas.Nda_Reshape_Spec":{"executedFeatures":["We can create a new Nda instance with a different shape."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":1},"title":"Nda Reshaping","narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n \n This is also true for reshaping operations, \n meaning that the Nda API does not expose methods that mutate the shape of an Nda\n but instead provides methods that return new instances of Nda\n with a different shape.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.ndim.NDConfiguration_Spec":{"executedFeatures":["Various NDConfigurations behave exactly like their general purpose implementation."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":28},"title":"Making Arrays N-Dimensional","narrative":"Under the hood Neureka implements powerful indexing \n abstractions through the `NDConfiguration` interface and its various implementations.\n This allows for the creation of tensors/nd-arrays with arbitrary dimensions, \n the ability to slice them into smaller tensors/nd-arrays with the same underlying data,\n and finally the ability to permute their axes (like transposing them for example).\n \n This specification however only focuses on the behaviour of the `NDConfiguration` interface\n which translates various types of indices."},"ut.ndim.Nda_Permute_Spec":{"executedFeatures":["We can use the \"permute\" method to rearrange the dimensions of an nd-array.","We can use the \"transpose\" method to transpose swap 2 dimensions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":2},"title":"Reshaping Nd-Arrays","narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It returns a new nd-array with the same data as the original nd-array, \n but with the specified dimensions rearranged. \n It is very useful for example when you want to\n change the order of dimensions, for example, if you have a nd-array with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n \n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern."},"ut.ndim.Shape_Spec":{"executedFeatures":["A shape can be created from a list of integers.","A shape can be created from a stream of ints.","A shape can be created from an iterable.","A shape can be mapped to a new shape.","A shape can be sliced.","Use the \"any\" or \"every\" method to check if a predicate holds for any or every value of the shape.","You can use the \"count(Predicate)\" method to count the number of values that satisfy a predicate."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":4},"title":"The Shape Tuple","narrative":"The `Shape` of an nd-array/tensor is in essence merely an immutable tuple of integers\n which define the size of each dimension of the tensor.\n So if you think of an nd-array as a grid of numbers, then the shape of the\n tensor is the size of the grid in each dimension.\n \n This specifications shows you how to create a shape and how to use it."},"ut.ndim.Tensor_NDConfiguration_Spec":{"executedFeatures":["NDConfiguration instances of tensors have expected state and behaviour.","NDConfiguration instances of tensors have expected state."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"What it means to be N-Dimensional","narrative":"This specification covers how implementations\n of the `NDConfiguration` interface manage to define\n what it means to be a n-dimensional tensor/nd-array."},"ut.ndim.Tensor_Permute_Spec":{"executedFeatures":["We can use the \"permute\" method to rearrange the dimensions of a tensor.","When matrices are transpose, they will change their layout type as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":5},"title":"Reshaping Tensors","narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It produces a new tensor with the same data as the original tensor, \n but with the specified dimensions rearranged. \n \n This is very useful for example when you want to\n change the order of dimensions, for example, if you have a tensor with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n \n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern."},"ut.ndim.Tensor_Slice_Permute_Spec":{"executedFeatures":["A slice of a tensor changes as expected when reshaping it.","Reshaping a slice works as expected.","Two slices of one big tensor perform matrix multiplication flawless."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":5},"title":"Permuting Slices of Tensors","narrative":"Neureka provides a convenient way to permuting tensors\n even if they are slices of other tensors sharing the same underlying data.\n This is possible because of the under the hood indexing \n abstractions provided by the `NDConfiguration` interface and its various implementations."},"ut.neureka.Neureka_Spec":{"executedFeatures":["Backend related library objects adhere to the same toString formatting convention!","Every Thread instance has their own Neureka instance.","Neureka class instance has expected behaviour.","Neureka settings class can be locked causing its properties to be immutable.","OpenCL related library objects adhere to the same toString formatting convention!","Various library objects adhere to the same toString formatting convention!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":8863},"title":"The Neureka context can be used and configured as expected.","narrative":"This specification covers the behavior of the Neureka class which\n exposes a global API for configuring thread local contexts and library settings.\n The purpose of this is to assert that the API exposed by the Neureka class \n is both thread local and configurable.\n This specification also exists to cover standards for the Neureka library in general."},"ut.optimization.ADAM_Spec":{"executedFeatures":["ADAM optimizes according to expected inputs","Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results.","Equations used by ADAM return expected result."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":46},"title":"","narrative":"ADAM is a more powerful alternative to the classical stochastic gradient descent. \n It combines the best properties of the AdaGrad and the RMSProp algorithms, which makes \n it especially well suited for sparse gradients and noisy data.\n Adam is the most popular among the adaptive optimizers\n because its adaptive learning rate working so well with sparse datasets."},"ut.optimization.AdaGrad_Spec":{"executedFeatures":["AdaGrad optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":15},"title":"","narrative":""},"ut.optimization.Momentum_Spec":{"executedFeatures":["Momentum optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":8},"title":"","narrative":"Momentum is an extension to the gradient descent optimization \n algorithm that allows the search to build inertia in a direction \n in the search space and overcome the oscillations of noisy \n gradients and coast across flat spots of the search space."},"ut.optimization.RMSProp_Spec":{"executedFeatures":["RMSprop optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":14},"title":"","narrative":"**Root Mean Squared Propagation**, or RMSProp, is an extension of gradient \n descent and the AdaGrad version of gradient descent that uses a \n decaying average of partial gradients in the adaptation of the \n step size for each parameter."},"ut.tensors.Copy_Spec":{"executedFeatures":["A deep copy of a slice tensor is also a deep copy of the underlying data array.","A deep copy of a tensor is also a deep copy of the underlying data array.","A shallow copy of a tensor will be flagged as such.","A shallow copy will share the same underlying data as its original tensor.","We can deep copy various types of tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":15},"title":"To Copy or Not to Copy","narrative":"In this specification we cover the behaviour of tensors with respect to their copy methods.\n There are to main ways to copy a tensor:
        \n 1. .shallowCopy()
        \n 2. .deepCopy()
        \n
        \n The first method creates a new tensor with the same underlying data array as the original tensor.
        \n The second method on the other hand creates a new tensor with a new data array.
        \n
        \n The first method is the most efficient, but it is not as safe as the second method.
        \n The second method is the most safe, but it is not as efficient.
        \n
        \n Besides these 2 main requirements, there are als some corner cases with respect to\n the components of a tensor (like for example its computation graph) which\n will be covered in this specification as well."},"ut.tensors.DimTrim_Spec":{"executedFeatures":["The \"dimTrim\" operation works on slices too!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":2},"title":"The 'dimTrim' Method","narrative":"The 'dimTrim' method is used to remove training and leading dimensions of length 1 from a tensor.\n This is useful when you want to perform operations on tensors of different ranks.\n For example, if you want to perform a dot product on two vectors, you can use the 'dimTrim' method\n to remove the dimension of length 1 from the vector, so that it becomes a scalar.\n This way you can perform the dot product on two scalars."},"ut.tensors.Expression_Based_Tensor_Instantiation_Spec":{"executedFeatures":["A tensor can be created from a function as expression.","We can instantiate tensors from various simple string expressions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"Expression based Tensor Instantiation","narrative":"This specification defines how a tensor can be instantiated\n using string expressions, which define operations to be executed.\n This form of tensor instantiation is very useful to avoid boilerplate code."},"ut.tensors.Fluent_Tensor_Creation_Spec":{"executedFeatures":["Initialization lambda based tensors can be created fluently.","Range based tensors can be created fluently.","Scalars can be created fluently.","Seed based tensors can be created fluently.","Tensors can be created fluently.","Value based tensors can be created fluently.","Vectors can be created fluently."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":20},"title":"","narrative":""},"ut.tensors.Functional_Nda_Spec":{"executedFeatures":["ND-Array mapping lambdas produce expected nd-arrays.","The \"map\" method is a shorter convenience method for mapping to the same type.","We can analyse the values of a nd-array using various predicate receiving methods","We can collect a stream into a nd-array.","We can find both min and max items in a tensor by providing a comparator.","We can find both min and max items in an ND-array by providing a comparator.","We can initialize an ND-Array using a filler lambda mapping indices to items.","We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\"."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":9,"passed":9,"successRate":1.0,"time":300},"title":"Functional ND-Arrays","narrative":"ND-Arrays expose a powerful API for performing operations on them\n in a functional style."},"ut.tensors.Functional_Tensor_Spec":{"executedFeatures":["Tensor mapping lambdas produce expected tensors.","The \"map\" method is a shorter convenience method for mapping to the same type.","We can analyse the values of a tensor using various predicate receiving methods","We can find both min and max items in a tensor by providing a comparator.","We can initialize a tensor using a filler lambda mapping indices to items.","We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\"."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":303},"title":"Functional Tensors","narrative":"Tensors expose a powerful API for performing operations on them\n in a functional style."},"ut.tensors.Reshape_Spec":{"executedFeatures":["The reshape operation supports autograd!","We can create a new tensor with a different shape.","We can use `-1` in the desired shape if we want the axis size to be determined automatically."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":2},"title":"Tensor Reshaping","narrative":"This specification demonstrates how to reshape tensors,\n which means to change the shape of a tensor.\n\n Note that immutability is a core concept of the Neureka library.\n This means that the `Tensor` API does not expose mutability directly.\n Instead, it exposes methods that return new instances of `Tensor`\n that are derived from the original instance.\n \n This is also true for reshaping operations.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.tensors.Tensor_As_Container_Spec":{"executedFeatures":["More tensor operations translate to custom data type \"ComplexNumber\".","Plus operator on String tensors works element-wise.","Tensor operations translate to custom data type \"ComplexNumber\".","We can apply predicates on the values of a tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":9},"title":"Why not have a tensor of words?","narrative":"Technically, tensors are merely fancy ND-arrays with some useful mathematical operations\n applicable to them...\n Therefore, there is no reason why a tensor would not also be able to store\n other kinds of objects besides numbers like strings for example.\n This specification ensures that tensors can hold and index many other things..."},"ut.tensors.Tensor_Assign_Spec":{"executedFeatures":["Assignment can be easily achieved through subscription operators.","We can assign one slice into another one.","We can use the \"mut\" API to assign the contents of one tensor into another one."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"Tensor Inline Assignment","narrative":"In this specification we cover the behaviour of tensors with respect to the assignment operation\n as well as the assignment of individual tensor items."},"ut.tensors.Tensor_Conversion_Spec":{"executedFeatures":["Tensors value type can be changed by calling \"toType(...)\".","We can change the data type of all kinds of tensors.","We turn a tensor into a scalar value or string through the \"as\" operator!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":11},"title":"Tensor Type Conversion","narrative":"Here we specify how a tensor can be converted to other data types\n like for example another tensor of a different data type."},"ut.tensors.Tensor_Convolution_Spec":{"executedFeatures":["Autograd works with simple 2D convolution.","Convolution can be performed using non-quadratic matrix tensors.","Convolution can be performed using tensors with an additional dimension as batch size.","Convolution with tensors of the same shape is equivalent to a dot product.","Manual convolution produces expected result.","Sime convolution works as expected eith autograd.","Tensors have the correct layout after convolution.","The \"x\" (convolution) operator produces expected results (On the CPU).","Very simple manual convolution produces expected result.","We can perform a convolution operation on a 2D tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":10,"passed":10,"successRate":1.0,"time":653},"title":"Tensor Convolution","narrative":"This specification shows how convolution can be performed on tensors.\n\n Convolution is a linear operation which is not only important for image processing but also\n a central player in the field of machine learning (especially for computer vision).\n It is used to extract features from images and other typically ~2 dimensional data.\n Other than that it is extremely important in the field of signal processing."},"ut.tensors.Tensor_Device_Spec":{"executedFeatures":["Adding OpenCL device to tensor makes tensor be \"outsourced\" and contain the Device instance as component.","Tensors try to migrate themselves to a device that is being added to them as component.","The device of a tensor can be accessed via the \"device()\" method.","When creating slices of tensors then this should trigger a \"parent - child\" relation noticeable to the device!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":4},"title":"Tensors on Devices","narrative":"This unit test specification covers \n the expected behavior of tensors when interacting\n with instances of implementations of the Device interface."},"ut.tensors.Tensor_Dot_Product_Spec":{"executedFeatures":["The \"dot\" method calculates the dot product between vectors.","The \"dot\" operation supports autograd.","The dot operation work even when one tensor is virtual.","The dot operation works for virtual tensors as well.","The dot product operation runs on any device.","The dot product works across different types and devices.","You can slice a Matrix into vectors and then used them for dot products."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":135},"title":"Tensor Dot Products","narrative":"A tensor can also be a simple vector, which is a tensor of rank 1.\n This specification demonstrates how to perform dot products on tensors of rank 1."},"ut.tensors.Tensor_Generics_Spec":{"executedFeatures":["1D tensors can be created from primitive arrays.","Anonymous tensor instance has the default datatype class as defined in Neureka settings.","We can create a tensor of strings."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"Tensors as Generic Containers","narrative":"Tensors do not just store numeric data.\n They can hold anything which can be stuffed into a \"Object[]\" array.\n You could even create a tensor of tensors!"},"ut.tensors.Tensor_Gradient_Spec":{"executedFeatures":["Gradient of tensor is being applies regardless of the tensor requiring gradient or not","Tensors can have gradients but not require them.","Tensors that have gradients but do not require them still print them."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"Gradients are Tensors which are Components of other Tensors","narrative":"This specification defines the gradient API on tensors.\n So one ought to be able to check wetter or not a tensor has a gradient attached to it or not.\n In that case one should be able to get this gradient and then work with\n it independently of the original tensor to which it belongs to..."},"ut.tensors.Tensor_IO_Spec":{"executedFeatures":["A tensor produced by the static \"Tensor.newRandom(shape)\" has expected \"random\" value.","Indexing after reshaping works as expected.","Tensor value type can not be changed by passing float or double arrays to it.","Tensor values can be manipulated","The tensor data array can be modified by targeting them with an index.","We can manipulate the underlying data array of a tensor through the mut API.","We can re-populate a tensor of shorts from a single scalar value!","When we try to manipulate the underlying data array of a virtual tensor then it will become actual."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":29},"title":"Reading and Writing Tensor Items","narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to read from and write to the state of a tensor."},"ut.tensors.Tensor_Instantiation_Spec":{"executedFeatures":["A matrix tensor can be instantiated using lists for it's shape and values.","A simple 2D vector can be instantiated using lists for it's shape and values.","Passing a seed in the form of a String to a tensor produces pseudo random items.","Scalar tensors can be created via static factory methods","Tensors can be instantiated based on arrays for both shapes and values.","Tensors can be instantiated with String seed.","Vector tensors can be instantiated via factory methods."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":11},"title":"Instantiating Tensors","narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to show how a tensor can be instantiated in different ways."},"ut.tensors.Tensor_Interop_Spec":{"executedFeatures":["Not all tensor can be converted to images.","Tensor can be converted to buffered images."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":7},"title":"Tensors play well with other data structures!","narrative":"Tensors should have good interoperability with other JDK data structures like images.\n In this specification we define these interoperability requirements."},"ut.tensors.Tensor_Layout_Spec":{"executedFeatures":["A new transposed version of a given tensor will be returned by the \"T()\" method.","Matrix multiplication works for both column and row major matrices across devices."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":144},"title":"Row or Column Major. Why not both?","narrative":"Although Neureka exposes tensors as row major tensors from \n a users point of view, it does in fact support both row major and column major \n based tensor layout under the hood.\n Here we cover how the layout of tensors can be modified\n and we ensure the different tensor types still work as expected...\n (The features in this specification involve mutating tensors, be careful when playing around with this yourself)"},"ut.tensors.Tensor_Operation_Spec":{"executedFeatures":["Activation functions work across types on slices and non sliced tensors.","Auto reshaping and broadcasting works and the result can be back propagated.","New method \"asFunction\" of String added at runtime is callable by groovy and also works.","New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work.","Operators \"+,*,**\" produce expected results with gradients which can be accessed via a \"Ig[0]\" Function instance","Overloaded operation methods on tensors produce expected results when called.","Scalar broadcasting works across devices.","Simple slice addition produces expected result.","The \"dot\" operation reshapes and produces valid \"x\" operation result.","The \"matMul\" operation produces the expected result.","The \"random\" function/operation populates tensors randomly.","The transpose operation exposed by the \"T()\" method, supports autograd.","The values of a randomly populated tensor seems to adhere to a gaussian distribution.","You can do matrix multiplication using 2 transposed matrices.","You can do matrix multiplication using transposed matrices as second operand.","You can do matrix multiplication using transposed matrices."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":16,"totalFeatures":16,"passed":16,"successRate":1.0,"time":5000},"title":"Running Tensors through operations","narrative":"This specification shows how to use the tensor API to run tensors through various operations.\n Operations are triggered either by simply calling methods on tensors or by using \n `Function` objects which are used to define custom operations in the form \n of a syntax tree."},"ut.tensors.Tensor_Slicing_Spec":{"executedFeatures":["A tensor can be sliced by passing ranges in the form of lists (Groovy ranges).","A tensor can be sliced by passing ranges in the form of primitive arrays.","Normal slicing will try to do autograd.","Slicing is also a Function with autograd support!","The \"at\" method and the \"from\" / \"to\" methods can be mixed when slicing a tensor.","The slice builder also supports slicing with custom step sizes.","We can avoid autograd when slicing by using the \"detached\" instead of the \"get\" method.","We can slice a scalar tensor from a larger tensor of rank 4.","When Slicing only one axis using the SliceBuilder API, the other axes will be sliced implicitly."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":9,"passed":9,"successRate":1.0,"time":13},"title":"Tensors within Tensors","narrative":"ND-Array data structures can be \"sliced\" in the sense\n that one can create a subset view of the underlying data inside a tensor\n through a new tensor instance...\n This can be a tedious and complicated procedure.\n Therefore a tensor should expose a various user friendly API for slicing which\n are also fit for various languages.\n This specification covers these APIs for tensor slicing."},"ut.tensors.Tensor_State_Spec":{"executedFeatures":["A tensor can be instantiated from a item type class and nested lists.","Numeric tensors as String can be formatted on an entry based level.","Tensor created from shape and datatype has expected state.","Tensors as String can be formatted depending on shape.","Tensors as String can be formatted on an entry based level.","The data and the value of a tensor a 2 different things!","We can create scalar tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":16},"title":"The Tensor Initialization and State Specification","narrative":"This specification defines the expected states of freshly instantiated\n and initialized tensors.\n After a tensor was created successfully we expect it \n to have certain properties like a shape, rank, type and data array\n among other things."},"ut.tensors.Tensor_Stats_Spec":{"executedFeatures":["A tensor can be summed alongside a specific axis.","Both the min and max operation support autograd (back-propagation).","Multiple dimensions of a tensor can selectively be summed up.","The sum operation support autograd (back-propagation).","There is no need to use a function, we can use the min() and max() methods on tensors instead.","We can get pre-instantiated min and max functions from the library context.","We can use the \"sum\" method to sum the items of a tensor.","We can use the max operation as a function"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":228},"title":"Reducing Tensors","narrative":"Various kinds of operations reduce tensors to scalars,\n the most common ones being the min and max operations \n which find the smallest as well as largest number among all \n items of a tensor.\n Neureka exposes various different ways to achieve this,\n all of which are also differential (autograd support)."},"ut.tensors.Tensor_Version_Spec":{"executedFeatures":["Inline operations cause illegal state exceptions.","Inline operations causes version incrementation.","Non-inline operations do not cause version incrementation.","Storing a tensor on a device should not change the version of a tensor (Even though its data changed technically)."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":57},"title":"Tensor (Data Array) Version","narrative":"There are two fundamental categories of operations\n which can be applied to tensors : \n Inline operations and Non-Inline operations! \n \n Inline operations are often times problematic because they produce\n side effects by changing passed tensors instead of producing new ones... \n One such bad side effect can easily occur for tensors involved in the\n autograd system, more specifically: the recorded computation graph. \n Inline operations can break the mathematically pureness of the back-propagation\n procedure by for example changing partial derivatives...
        \n In order to prevent said errors from occurring unnoticed tensors\n have versions which will increment when the underlying data of the tensor changes. \n This version will be tracked by the computation graph as well in order to\n match it with the ones stored inside the tensor. \n A mismatch would then yield an exception! \n \n This specification is responsible for defining the behaviour of this\n version number with respect to their wrapping tensors as well as computation graph nodes."},"ut.tensors.exceptions.Tensor_Delete_Exception_Spec":{"executedFeatures":["A deleted tensor will tell you that it has been deleted.","A deleted tensor will throw an exception when accessing its configuration.","A deleted tensor will throw an exception when accessing its data type.","A deleted tensor will throw an exception when accessing its data.","A deleted tensor will throw an exception when modifying its data type.","A deleted tensor will throw an exception when trying to modify its data.","A deleted tensor will throw an exception when trying to set its configuration."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":3},"title":"","narrative":""},"ut.tensors.exceptions.Tensor_Exception_Spec":{"executedFeatures":["Building a tensor with \"null\" as shape argument throws an exception.","Building a tensor with 0 shape arguments throws an exception.","Casting a tensor as something unusual will cuas an exception to be thrown.","Out of dimension bound causes descriptive exception!","Passing an invalid key object into the \"getAt\" method causes a descriptive exception.","Passing an invalid object into Tensor constructor causes descriptive exception.","Passing null to various methods of the tensor API will throw exceptions.","Trying to inject an empty tensor into another causes fitting exception."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":13},"title":"Tensors Exception Behavior","narrative":"This specification covers the behavior of the Tensor class in\n exceptional scenarios which are contrary to its intended use.\n The purpose of this is to assert that the Tensor class will provide\n useful feedback to a user to explain that a misuse of its API\n occurred so that the user can correct this misuse."},"ut.utility.Cleaner_Testing":{"executedFeatures":["The DeviceCleaner triggers registered cleaner actions when things are eligible for GC."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":252},"title":"How Neureka Cleans Up","narrative":"Under the hood \n Neureka deals whith large arrays of\n data, which are often times \n native data arrays requiring explicit\n memory freeing!\n This freeing of memory can happen at any time\n during the livetime of a nd-array, however\n it should happen at least up until the nd-arra/tensor\n objects representing their referenced data arrays become\n eligible for garbage collection.\n This specification ensures that the custom garbage\n cleaner implementation used by Neureka fulfills this role"},"ut.utility.DataConverter_Spec":{"executedFeatures":["An array of any type of object may be converted to a array of primitives.","The DataConverter can convert the given array data."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":2},"title":"","narrative":""},"ut.utility.FileHandle_Spec":{"executedFeatures":["Fully labeled tenors will be stored with their labels included when saving them as CSV.","Partially labeled tenors will be stored with their labels included when saving them as CSV.","Test reading IDX file format.","Test writing IDX file format.","The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors.","We can load image files as tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":617},"title":"","narrative":""},"ut.utility.ListReader_Exception_Spec":{"executedFeatures":["The ListReader will detect inconsistent degrees of nesting in the provided data.","The ListReader will detect inconsistent types in the provided data."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":2},"title":"","narrative":""},"ut.utility.ListReader_Spec":{"executedFeatures":["The ListReader can interpret nested lists into a shape list and value list.","The ListReader can interpret nested lists resembling a 3D tensor into a shape list and value list.","The ListReader can interpret nested lists resembling a matrix into a shape list and value list."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"The Internal ListReader turning lists into flat arrays with shape and type data","narrative":"This specification covers an internal class which should not be used\n outside this library, namely the ListReader class.\n This class is simply a converter which turns nested lists\n into flat arrays alongside the type of the elements and the shape of this \"tensor\"."},"ut.utility.Utility_Spec":{"executedFeatures":["Object arrays can be converted to primitive arrays."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":6},"title":"","narrative":""},"ut.device.CPU_Spec":{"executedFeatures":["CPU knows the current number of available processor cores!","The CPU device will keep track of the amount of tensors it stores.","The CPU exposes a non null API for executing workloads in parallel.","Thread pool executes given workload in parallel"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":704},"title":"The CPU device, an API for CPU based execution","narrative":"The CPU class, one of many implementations of the Device interface, \n is simply supposed to be an API for dispatching threaded workloads onto the CPU.\n Contrary to other types of device, the CPU will host tensor data by default, simply\n because the tensors will be stored in RAM if no device was specified."},"ut.device.Cross_Device_Type_Spec":{"executedFeatures":["A device will keep track of the amount of tensors and data objects it stores.","Devices expose an API for accessing (reading and writing) the data of a tensor.","Devices store slices which can also be restored just like any other tensor.","Devices store tensors which can also be restored.","Execution calls containing null arguments will cause an exception to be thrown in device instances.","In total there are 3 different types of methods for finding device instances.","Passing a numeric array to a tensor should modify its contents!","Virtual tensors stay virtual when outsourced.","We can find Device implementations or null by passing search keys to the \"get\" method.","We can query the backend for devices by specifying both the requested type and a key word."],"ignoredFeatures":["Devices cannot store slices whose parents are not already stored."],"stats":{"failures":0,"errors":0,"skipped":1,"totalRuns":10,"totalFeatures":11,"passed":10,"successRate":1.0,"time":834},"title":"Finding Device Types","narrative":"Neureka introduces a the concept of a `Device` which is an interface\n that represents a computational device used for executing tensor / nd-array operations on them.\n The `Device` interface is implemented by various classes which represent\n different types of accelerator hardware such as `CPUs`, `GPUs`, `TPUs`, `FPGAs`, etc.\n These various `Device` types can not be instantiated directly because they model \n the concrete and finite hardware that is available on any given system Neureka is running on.\n This means that they are usually instantiated lazily upon access request or \n upfront by the library backend (usually a backend extension built fo a specific device).\n In order to find these instances embedded in the library backend the `Device` interface\n exposes various static methods which can be used to find a device instance by name or type."},"Example_Spec.Example_Spec":{"executedFeatures":["Call me feature not unit test!","I am readable and also best practice!","Numbers to the power of two with a fancy data table!","Should be able to remove from list","iAmNotSoReadable"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":5},"title":"An Introduction to writing Spock Specifications","narrative":"Hello and welcome to the example / template specification of this project.\n This is a simple introduction as to how to get started writing Spock specifications.\n \n Spock works on top of Groovy which is in essence a syntactic super-set of Java.\n That means that one can write Java code in Groovy, and 99% of the time it will \n work the exact same way."},"it.Calculus_Stress_Test":{"executedFeatures":["Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors.","Activation functions work across types.","Dot operation stress test runs error free and produces expected result","Stress test runs error free and produces expected result","The broadcast operation stress test runs error free and produces expected result"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":589},"title":"","narrative":""},"it.Cross_Device_Sliced_Tensor_System_Test":{"executedFeatures":["Cross device sliced tensor integration test runs without errors.","Slices can be created using the SliceBuilder."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":70},"title":"Cross Device Tensor Slicing","narrative":""},"it.Cross_Device_Spec":{"executedFeatures":["A gradient of ones can be set by calling the backward method on a tensor sitting on any device.","Convolution can model matrix multiplications across devices.","Cross device system test runs successfully.","Mapping tensors works for every device (even if they are not used).","Test simple NN implementation with manual backprop"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":8704},"title":"Cross Device Stress Test Specification","narrative":"This specification is pretty much a system test which covers\n the behavior of the library as a whole across multiple devices!\n No matter which device is being used for a given stress test, the result should be the same..."},"it.Eleven_Lines_NN_System_Spec":{"executedFeatures":["One can write a simple double based neural network in less than 11 lines of java like code using the \"@\" operator!","One can write a simple float based neural network in less than 11 lines of java like code!","One can write a simple neural network in less than 11 lines of code!","One can write a simple neural network with custom back-prop in 11 lines of code!","The pseudo random number generator works as expected for the weights used in the 11 line NN examples!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":281},"title":"NN Code Golfing!","narrative":"This system test specification uses the following Numpy\n code as reference implementation for the equivalent in Neureka\n or similar implementations and variations.\n The code below is a simple neural network in only 11 lines of code.\n\n \u00b4\u00b4\u00b4\n X = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1] ])\n y = np.array([[0,1,1,0]]).T\n W1 = 2*np.random.random((3,4)) - 1\n W2 = 2*np.random.random((4,1)) - 1\n for j in xrange(60000):\n l1 = 1/(1+np.exp(-(np.dot(X,W1))))\n l2 = 1/(1+np.exp(-(np.dot(l1,W2))))\n l2_delta = (y - l2)*(l2*(1-l2))\n l1_delta = l2_delta.dot(W2.T) * (l1 * (1-l1))\n W2 += l1.T.dot(l2_delta)\n W1 += X.T.dot(l1_delta)\n \u00b4\u00b4\u00b4"},"st.Benchmark_System_Test":{"executedFeatures":["Tensor can be constructed by passing List instances.","Test benchmark script and simple tensor constructor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":910},"title":"","narrative":""},"st.Broad_System_Test":{"executedFeatures":["The long broad integration test runs successfully."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":33},"title":"","narrative":""},"st.NN_Concepts_Spec":{"executedFeatures":["The attention mechanism (found in the commonly known transformer) demonstrated."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":140},"title":"Examining Neural Network Architecture Snippets","narrative":"This specification is intended to showcase some basic building blocks of \n various neural network architectures."},"st.Training_NNs_Spec":{"executedFeatures":["A simple 3 layer neural network converges.","A very simple 1 layer NN converges.","We can run the attention head test model."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":7071},"title":"Training a Neural Network Class","narrative":"When designing larger neural network architectures, what you would usually do is\n to create a class that represents the whole model (which itself might be composed\n of smaller models). \n \n This class would then represent something that can be executed and then trained.\n This Specification shows how to instantiate, execute and train various \n pre-defined example neural network models."},"ut.autograd.AD_And_Computation_Graph_Spec":{"executedFeatures":["Payloads and derivatives are null after garbage collection.","Reshaping produces expected computation graph and also works with reverse mode AD."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":605},"title":"","narrative":""},"ut.autograd.Autograd_Explained":{"executedFeatures":["Simple automatic differentiation and propagation."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":6},"title":"Autograd - Automatic Differentiation","narrative":"Central to all neural networks in Neureka is the autograd package. \n The autograd package provides automatic differentiation for all default operations on Tensors. \n Neureka is a define-by-run library, which means that your backpropagation is defined by how \n your code is run, and that every single iteration can be different. \n \n The class neureka.Tensor is the central class of the main package. \n If you set its attribute 'rqsGradient' to True, Neureka starts to track all operations on it. \n When you finish the forward pass of your network \n you can call .backward() and have all the gradients computed \n and distributed to the tensors requiring them automatically. \n \n The gradient for a tensor will be accumulated into a child tensor (component) which \n can be accessed via the '.getGradient()' method. \n \n To stop a tensor from tracking history, you can call '.detach()' to detach it from the \n computation history, and to prevent future computation from being tracked."},"ut.autograd.Autograd_Flags_Explained":{"executedFeatures":["Advanced backpropagation on all AD-Modes ","We can create a shallow copy of a tensor detached from the computation graph."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":2,"passed":9,"successRate":1.0,"time":48},"title":"","narrative":""},"ut.autograd.Autograd_NN_Spec":{"executedFeatures":["Autograd work for simple matrix multiplications.","Autograd works for 2 matrix multiplications in a row.","Autograd works in a simple convolutional dot product and float based feed forward neural network.","Autograd works in a simple convolutional dot product based feed forward neural network.","Autograd works in a simple mat-mul based feed forward neural network."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":76},"title":"Simple Neural Network autograd integration test","narrative":"The integration test below has been implemented by using\n the following code and the result it produces as reference : \n https://medium.com/dair-ai/a-simple-neural-network-from-scratch-with-pytorch-and-google-colab-c7f3830618e0 \n \n The following seed has been used to assure reproducibility :\n 'torch.manual_seed(503672689411)'"},"ut.autograd.Autograd_Tensor_Spec":{"executedFeatures":["A tensor used as derivative within a computation graph will throw exception when trying to deleting it.","Second-Test \"x-mul\" autograd behaviour. (Not on device)","Test basic autograd behaviour. (Not on device)"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":15},"title":"","narrative":""},"ut.autograd.JITProp_Autograd_Tensor_Spec":{"executedFeatures":["Gradient auto-apply kicks in when used AD uses JIT prop","Test JIT propagation variant one.","Test JIT propagation variant two.","Test autograd without JIT and auto apply.","Test in-differential and JIT with auto apply","Test no JIT prop when forward AD","Test no preemptive gradient apply when not requested and auto apply and JIT_prop","Test pending error optimization"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":121},"title":"","narrative":""},"ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests":{"executedFeatures":["GraphNode instantiation throws exception because tensors of ExecutionCall do not return GraphNode instances.","GraphNode throws an exception when trying to execute an inline operation on inputs with active autograd."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":71},"title":"","narrative":""},"ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests":{"executedFeatures":["A tensor cannot be deleted if it is part of a graph and the tensor is used as derivative."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":4},"title":"","narrative":""},"ut.backend.Backend_Extension_Spec":{"executedFeatures":["Mock operation interacts with FunctionNode (AbstractFunction) instance as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":58},"title":"","narrative":""},"ut.backend.Backend_MatMul_Extension_Spec":{"executedFeatures":["GEMM matrix multiplication reference implementation can be set as custom OperationType and works as expected.","Test context mock for opencl reference implementations.","Tile parsing for kernel parameter calculation yields expected tile dimensions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":53},"title":"","narrative":""},"ut.backend.Matrix_Multiplication_Spec":{"executedFeatures":["The \"matMul\" method allows us to perform matrix multiplication.","The simple CPU matrix multiplication implementation works as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":21},"title":"Matrix Multiplication","narrative":"The tensor API exposes a useful method for Matrix Multiplication.\n This specification not only demonstrates how to use this method\n but also shows how matrix multiplication work \n for tensors with both row and column major layouts.\n (typically, column major is faster)"},"ut.backend.core.Backend_Algorithm_AD_Spec":{"executedFeatures":["Activation implementations behave as expected.","Broadcast implementations have expected properties.","Convolution implementations behave as expected.","Operator implementations behave as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":46},"title":"","narrative":""},"ut.backend.core.Backend_Algorithm_Implementation_Spec":{"executedFeatures":["Activation implementations have expected Executor instances.","HostExecutors of Operator implementations behave as expected.","Operator implementations have expected Executor instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":60},"title":"","narrative":""},"ut.backend.core.Backend_Functional_Algorithm_Spec":{"executedFeatures":["A functional algorithm cannot be used if it was not built properly!","A functional algorithm does not accept null as an answer!","A functional algorithm warns us when modified after it has been built!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":11},"title":"","narrative":""},"ut.backend.core.Matrix_Multiplication_Spec":{"executedFeatures":["The CPU matrix multiplication implementation works as expected.","The internal matrix multiplication test script runs!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":1154},"title":"Internal CPU based Matrix Multiplication","narrative":"This specification covers library internal matrix multiplication logic,\n specifically the CPU implementation.\n Do not depend on the API used in this specification as it is subject to change!"},"ut.backend.core.OpenCL_Backend_Spec":{"executedFeatures":["The OpenCL backend context can load implementations."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":6},"title":"","narrative":""},"ut.backend.core.Randomization_Spec":{"executedFeatures":["Randomization is in essence the same algorithm as JDKs \"Random\".","The Randomization class can fill various types of arrays with pseudo random numbers.","We can make slices of tensors random."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":18},"title":"","narrative":""},"ut.device.Cross_Device_IO_Spec":{"executedFeatures":["We can use the access device API to read from a tensor.","We can use the access device API to write to a tensor"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":15},"title":"Devices manage the states of the tensors they store!","narrative":"Tensors should not manage their states\n themselves, simply because the type and location\n of the data is dependent on the device onto which they are stored.\n This specification tests of various device implementations\n enable reading to or writing from the tensors they store."},"ut.device.FileDevice_Spec":{"executedFeatures":["A file device stores tensors in idx files by default.","A file device stores tensors in various file formats.","A tensor loaded from a file device can be loaded again.","The file device can load known files in a directory."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":48},"title":"FileDevice, Storing Tensors in Files","narrative":"The `FileDevice` class, one of many implementations of the `Device` interface, \n represents a file directory which can store and load tensors as files (`idx`, `jpg`, `png`...)."},"ut.device.OpenCLDevice_Exception_Spec":{"executedFeatures":["Ad hoc compilation produces expected exceptions when duplication is found.","Ad hoc compilation produces expected exceptions.","An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","Trying to restore a tensor which is not on a device raises exception."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":79},"title":"OpenCLDevice Exception Handling","narrative":"The OpenCLDevice class, one of many implementations of the Device interface, \n represents physical OpenCL devices.\n This specification defines how instances of this class deal with exceptional information."},"ut.device.OpenCLDevice_Spec":{"executedFeatures":["Ad hoc compilation produces executable kernel.","Ad hoc compilation works for WIP general purpose matrix multiplication.","Ad hoc compilation works for custom column major based tiled matrix multiplication.","Ad hoc compilation works for custom simple row major based matrix multiplication.","Ad hoc matrix multiplication works for multiple of 16 matrices.","An OpenCLDevice loads tensors in a provided lambda temporarily.","We can get the items of an outsourced tensor as a primitive array.","We can take a look at the underlying data array of an outsourced tensor through the unsafe API."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":2217},"title":"The OpenCLDevice Specification","narrative":"Tensors need devices for execution!\n By default tensors use the `CPU` device, but sometimes we want to\n use something more suitable for large amounts of data and a high degree of parallelization.\n This is were the `OpenCLDevice` comes into play!\n It is a `Device` implementation built on top of the JOCL library, a thin OpenCL API.\n We expect the `OpenCLDevice` to store tensors as well as being able to read and write\n data from and to stored tensors.\n Also, an `OpenCLDevice` should allows us to compile OpenCL kernel code on the fly..."},"ut.device.OpenCL_Spec":{"executedFeatures":["A given OpenCL context can be disposed!","An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","First found OpenCLDevice will have realistic numeric properties.","First found OpenCLDevice will have realistic properties inside summary query.","First found OpenCLDevice will have realistic text properties."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":3009},"title":"Working with OpenCL","narrative":"Neureka models the OpenCL API through various types of classes.\n The most fundamental of these is the `OpenCLDevice` class which\n represents a single device with OpenCL support.\n Besides that, there is also the `OpenCLContext` class which\n represents a OpenCL contexts, platforms and multiple devices on said platforms..."},"ut.device.internal.CLFunctionCompiler_Spec":{"executedFeatures":["The CLFunctionCompiler produces an operation which properly integrates to the backend.","The CLFunctionCompiler produces the expected \"ad hoc\" kernel.","The OpenCLDevice produces a working optimized Function for doubles.","The OpenCLDevice produces a working optimized Function for floats."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":170},"title":"Turning functions into kernels.","narrative":"Neureka parses mathematical expressions into an AST representation\n hidden behind the Function interface...\n This feature does not exist without reason, we can use\n this abstract syntax tree to compile to OpenCL kernels\n for optimal execution speed!"},"ut.device.internal.CPU_Kernel_Spec":{"executedFeatures":["The Reduce implementation for the CPU has realistic behaviour","The Sum implementation for the CPU has realistic behaviour"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":27},"title":"","narrative":""},"ut.device.internal.OpenCL_Data_Spec":{"executedFeatures":["The \"Data\" class can represent various OpenCL data types.","The OpenCLDevice specific Data class represents JVM data for OpenCL."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":19},"title":"","narrative":""},"ut.device.internal.OpenCL_Kernel_Unit_Spec":{"executedFeatures":["The CLDot implementation for the OpenCLDevice has realistic behaviour","The GEMM implementation for the OpenCLDevice has realistic behaviour","The Reduce implementation for the OpenCLDevice has realistic behaviour","The Sum implementation for the OpenCLDevice has realistic behaviour","The Sum implementation for the OpenCLDevice has realistic behaviour for when the number of elements is a prime."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":12},"title":"","narrative":""},"ut.dtype.DataType_Spec":{"executedFeatures":["DataType multi-ton instances behave as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":5},"title":"","narrative":""},"ut.dtype.NumericType_Spec":{"executedFeatures":["Conversion goes both ways and produces expected numeric values.","NumericType conversion to holder types yields expected results.","NumericType implementations behave as expected.","NumericType implementations return their expected properties."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":32},"title":"The NumericType and its implementations model their respective numeric data types.","narrative":"This specification covers the behavior of the NumericType interface\n which is responsible for modelling numeric data types which may or may not be native to the JVM. \n These implementations however do not model them in the traditional OO style\n but merely expose useful utility method for converting and representing \n these numeric data types using JVM types."},"ut.framing.Tensor_Framing_Spec":{"executedFeatures":["A matrix (rank 2 tensor) can be labeled and their labels can be used to extract slices / subsets.","A tensor can be labeled partially.","Rank 3 tensors can be labeled and their labels can be used to extract slices / subsets of tensors.","We can add labels to tensors through lists or maps passed to the \"label()\" method."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":14},"title":"Naming Tensors and their Dimensions.","narrative":"A powerful concept in the data science as well as machine learning\n world is something usually referred to as \"Data Frames\".\n These are highly flexible 2D data structures\n used to load and store CSV, CRV, etc... files for \n data exploration and further processing.\n Data frames are so powerful because\n their indices are labeled and therefore human readable.\n Neureka's tensors are general purpose data containers\n which may also stored data in 2 dimensions whose\n indices may also be something other than integers."},"ut.introductions.Tensor_NDArray_Spec":{"executedFeatures":["Tensor is a subtype of NdArray.","We can use tensors for numeric calculations (but not nd-arrays)."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"Tensors or Nd-arrays","narrative":"*What is the difference?*\n\nIn the world of machine learning we use something called a **'tensor'** to represent data.\nThey might be called **'nd-arrays'** in some other frameworks,\nbut although they are very similar, \nthere are also some important distinctions to be made between these two concepts.\nBoth are at their core merely multidimensional arrays, however,\nthey are different in their typical usage and API.\nnd-arrays are merely used to represent any type of data as a \ncollection of elements in a multidimensional grid, \ntensors on the other hand have additional requirements.\nThey are a type of nd-array which stores numeric data \nas well as expose various mathematical operations for said data.\nIn that sense it is actually merely a more complex kind of number.\nThis concept actually comes from the field of physics, \nwhere it is used to represent a physical quantity.\n\nNeureka models both concepts through the `Tensor` and the `Nda` interfaces.\n`Nda` is an abbreviation of `NdArray`, and `Tensor` is an abbreviation of `Tensor`.\nThe `Tensor` type is a subtype of the `Nda` type, exposing additional methods\nlike for example `plus`, `minus`, `times` and `divide`.\nBoth can be instantiated through static factory methods (and a fluent builder API)."},"ut.math.BackendContext_Spec":{"executedFeatures":["BackendContext instances can be created by cloning from Singleton instance.","BackendContext instances return Runner instances for easy visiting with return values.","BackendContext instances return Runner instances for easy visiting."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":6},"title":"The BackendContext is a cloneable context which can run Tasks.","narrative":"This specification defines the expected behaviour of the backend context\n which should expose a convenient API to work with.\n This API should allow for tasks to be running on a given context\n which is important for testing and modularity not only\n during library startup but also throughout the runtime."},"ut.math.ConCat_Spec":{"executedFeatures":["We can concatenate 2 float tensors alongside a specified axis!","We can concatenate 2 string tensors alongside a specified axis!","We can concatenate 2 tensors alongside a specified axis!","We can concatenate and then back-propagate 2 simple float tensors alongside a specified axis!","We can concatenate and then back-propagate 3 simple float tensors alongside a specified axis!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":17},"title":"Merging Tensors","narrative":"Tensors can not only be sliced, but also merged.\n This is most easily achieved through the concatenation operation, \n which stacks 2 tensors alongside a specified axis.\n This specification not only covers how you can concatenate tensors,\n but also how this works alongside autograd and non-numeric tensors."},"ut.math.Function_Exception_Spec":{"executedFeatures":["Function throws exception when arity does not match input number.","Function throws exception when not enough inputs provided."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":1},"title":"","narrative":""},"ut.math.Function_Parsing_Spec":{"executedFeatures":["Functions can derive themselves according to the provided index of the input which ought to be derived.","Parsed equations throw expected error messages.","Test parsed equations when building Function instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":20},"title":"Parsing Expressions into Functions","narrative":"Neureka uses the 'Function' interface as a representation of a\n nested structure of operations.\n This means that a 'Function' is simply an abstract syntax trees made up of other 'Function' implementations\n which are assembled together by a parser receiving a string expression.\n In this specification we ensure that function expressions will be properly parsed into\n 'Function' implementations."},"ut.math.Function_Scalar_Spec":{"executedFeatures":["Function \"(I[0]+1/I[0])**-I[0]\" instance returns expected scalar result.","Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars.","Function \"1/I[0]\" instance returns expected scalar results.","Function \"I[0]+1/I[0]\" instance returns expected scalar results.","Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance.","Test scalar results of various Function instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":27},"title":"Functions for Scalars","narrative":"The Function API and it's implementations \n receive and process arrays of scalars as arguments.\n Functions don't have to be used alongside tensors / nd-arrays,\n they can also compute derivatives based on scalar values."},"ut.math.Function_Spec":{"executedFeatures":["Function implementations ensure that internally created tensors are flagged as \"intermediate\" initially!","Function implementations ensure that outputs which are input members are not flagged as \"intermediate\"!","Function implementations will ensure the \"call\" and \"invoke\" does not return tensors flagged as \"intermediate\".","The library context exposes a set of useful functions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":23},"title":"Testing Default Methods on Functions","narrative":"This specification tests the default methods on functions\n through a simple dummy implementation of the Function interface."},"ut.math.Tensor_Function_Spec":{"executedFeatures":["Executed tensors are intermediate tensors.","Reshaping on 3D tensors works by instantiate a Function instance built from a String.","Tensor results of various Function instances return expected results.","The \"DimTrim\" operation works forward as well as backward!","The optimization function for the SGD algorithm produces the expected result","The softmax can be calculated alongside multiple axes.","The softmax can be calculated for a particular axis.","The softmax function can be applied to tensors with more than one dimension.","The tensor API has built-in methods for applying functions.","We can collect a stream into a tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":10,"passed":10,"successRate":1.0,"time":80},"title":"Applying Functions to Tensors","narrative":"A tensor would be nothing without being able to apply operations on them.\n However, calling operations manually in order to process your\n tensors can be a verbose and error prone task.\n This is where functions come into play.\n Neureka's functions are composed of operations forming an abstract syntax tree.\n Passing tensors to a function will route them trough this tree and apply\n all of the operations on the tensors for you."},"ut.miscellaneous.Weired_NN_Spec":{"executedFeatures":["Dot based feed forward and activation produces expected result."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":19},"title":"","narrative":"This specification is meant less as feature documentation and more as a\n chaos test for weired neural network architectures\n an unusual usages of the Neureka library."},"ut.ndas.Nda_Assign_Spec":{"executedFeatures":["Assignment can be easily achieved through subscription operators.","We can assign one slice into another one.","We can use the \"mut\" API to assign the contents of one nd-array into another one."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":2},"title":"Nda Inline Assignment","narrative":"In this specification we cover the behaviour of nda's with respect to the assignment operation\n as well as the assignment of individual Nda items."},"ut.ndas.Nda_Framing_Spec":{"executedFeatures":["An Nda can be labeled.","Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.","The slice of a labeled vector is labeled too.","We can label the columns and rows of a rank 3 nd-array.","We can label the columns of a rank 2 nd-array.","We can use labels as selectors for slicing."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":15},"title":"Nda framing","narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n \n This is also true for labeling operations, \n meaning that the Nda API does not directly expose methods that mutate labels of an Nda\n but instead provides methods that return new instances of Nda\n with different labels.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.ndas.Nda_Inplace_Framing_Spec":{"executedFeatures":["Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.","The slice of a labeled vector is labeled too.","We can concatenate more than 2 nd-arrays.","We can label the columns and rows of a rank 3 nd-array.","We can label the columns of a rank 2 nd-array.","We can use labels as selectors for slicing."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":5},"title":"NDA Framing","narrative":"Framing an nd-array is all about naming its axes and then using those names to\n access, read or write its values in a more convenient and human readable way."},"ut.ndas.Nda_Instantiation_Spec":{"executedFeatures":["A vector can be created from an array of values through the \"of\" method.","Common types of nd-arrays are best instantiated using type specific convenience methods.","ND-arrays can be created fluently."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":4},"title":"ND-Array Instantiation","narrative":"In this specification we cover how ND-arrays can be instantiated."},"ut.ndas.Nda_Items_Spec":{"executedFeatures":["An item can be converted to an Optional object.","Other than the \"orElse(T)\" method of the Optional class, the same method of an Item will throw an exception if the provided value is null.","The \"get\" method of an Item object will throw an exception if the item is missing.","We can check if items of a tensor is present or not.","We can get the value of an item.","We can use the \"orElse(T)\" method to avoid null values."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":4},"title":"The Nds Items API","narrative":"Nd-arrays are collections of items similar to other\n collection types in Java. \n One useful way to access the items of an nd-array is\n to use the items API.\n \n Using the `at` methods we can access an `Item` object\n which is a wrapper around the item's value and its\n index in the nd-array.\n \n The `Item` object is a simple data class which\n is very similar to the `Optional` class, meaning\n that it can either be empty or contain a value."},"ut.ndas.Nda_Mutation_Spec":{"executedFeatures":["A ND-Array can be mutated simply using the \"set\" method.","A ND-Array can be mutated using the \"at(..).set(..)\" methods.","A simple vector ND-Array can be mutated using the \"at(..).set(..)\" methods.","A simple vector ND-Array can be mutated using the \"setItemAt\" method.","We can use the subscription operator to mutate a simple vector ND-Array.","We can use the subscription operator to mutate an ND-Array."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":4},"title":"Mutating ND-Arrays","narrative":"ND-Arrays should be considered immutable, so we should prefer creating new \n ND-Arrays from existing ones using wither methods.\n However this is not always a good idea as it can be expensive to create new\n ND-Arrays, especially if the ND-Array is very large.\n The ability to mutate ND-Arrays is therefore provided, but only\n accessible via the mutation API exposed by the `getMut()` method."},"ut.ndas.Nda_Reshape_Spec":{"executedFeatures":["We can create a new Nda instance with a different shape."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":1},"title":"Nda Reshaping","narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n \n This is also true for reshaping operations, \n meaning that the Nda API does not expose methods that mutate the shape of an Nda\n but instead provides methods that return new instances of Nda\n with a different shape.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.ndim.NDConfiguration_Spec":{"executedFeatures":["Various NDConfigurations behave exactly like their general purpose implementation."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":24},"title":"Making Arrays N-Dimensional","narrative":"Under the hood Neureka implements powerful indexing \n abstractions through the `NDConfiguration` interface and its various implementations.\n This allows for the creation of tensors/nd-arrays with arbitrary dimensions, \n the ability to slice them into smaller tensors/nd-arrays with the same underlying data,\n and finally the ability to permute their axes (like transposing them for example).\n \n This specification however only focuses on the behaviour of the `NDConfiguration` interface\n which translates various types of indices."},"ut.ndim.Nda_Permute_Spec":{"executedFeatures":["We can use the \"permute\" method to rearrange the dimensions of an nd-array.","We can use the \"transpose\" method to transpose swap 2 dimensions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"Reshaping Nd-Arrays","narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It returns a new nd-array with the same data as the original nd-array, \n but with the specified dimensions rearranged. \n It is very useful for example when you want to\n change the order of dimensions, for example, if you have a nd-array with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n \n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern."},"ut.ndim.Shape_Spec":{"executedFeatures":["A shape can be created from a list of integers.","A shape can be created from a stream of ints.","A shape can be created from an iterable.","A shape can be mapped to a new shape.","A shape can be sliced.","Use the \"any\" or \"every\" method to check if a predicate holds for any or every value of the shape.","You can use the \"count(Predicate)\" method to count the number of values that satisfy a predicate."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":3},"title":"The Shape Tuple","narrative":"The `Shape` of an nd-array/tensor is in essence merely an immutable tuple of integers\n which define the size of each dimension of the tensor.\n So if you think of an nd-array as a grid of numbers, then the shape of the\n tensor is the size of the grid in each dimension.\n \n This specifications shows you how to create a shape and how to use it."},"ut.ndim.Tensor_NDConfiguration_Spec":{"executedFeatures":["NDConfiguration instances of tensors have expected state and behaviour.","NDConfiguration instances of tensors have expected state."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"What it means to be N-Dimensional","narrative":"This specification covers how implementations\n of the `NDConfiguration` interface manage to define\n what it means to be a n-dimensional tensor/nd-array."},"ut.ndim.Tensor_Permute_Spec":{"executedFeatures":["We can use the \"permute\" method to rearrange the dimensions of a tensor.","When matrices are transpose, they will change their layout type as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"Reshaping Tensors","narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It produces a new tensor with the same data as the original tensor, \n but with the specified dimensions rearranged. \n \n This is very useful for example when you want to\n change the order of dimensions, for example, if you have a tensor with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n \n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern."},"ut.ndim.Tensor_Slice_Permute_Spec":{"executedFeatures":["A slice of a tensor changes as expected when reshaping it.","Reshaping a slice works as expected.","Two slices of one big tensor perform matrix multiplication flawless."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":5},"title":"Permuting Slices of Tensors","narrative":"Neureka provides a convenient way to permuting tensors\n even if they are slices of other tensors sharing the same underlying data.\n This is possible because of the under the hood indexing \n abstractions provided by the `NDConfiguration` interface and its various implementations."},"ut.neureka.Neureka_Spec":{"executedFeatures":["Backend related library objects adhere to the same toString formatting convention!","Every Thread instance has their own Neureka instance.","Neureka class instance has expected behaviour.","Neureka settings class can be locked causing its properties to be immutable.","OpenCL related library objects adhere to the same toString formatting convention!","Various library objects adhere to the same toString formatting convention!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":8598},"title":"The Neureka context can be used and configured as expected.","narrative":"This specification covers the behavior of the Neureka class which\n exposes a global API for configuring thread local contexts and library settings.\n The purpose of this is to assert that the API exposed by the Neureka class \n is both thread local and configurable.\n This specification also exists to cover standards for the Neureka library in general."},"ut.optimization.ADAM_Spec":{"executedFeatures":["ADAM optimizes according to expected inputs","Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results.","Equations used by ADAM return expected result."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":50},"title":"","narrative":"ADAM is a more powerful alternative to the classical stochastic gradient descent. \n It combines the best properties of the AdaGrad and the RMSProp algorithms, which makes \n it especially well suited for sparse gradients and noisy data.\n Adam is the most popular among the adaptive optimizers\n because its adaptive learning rate working so well with sparse datasets."},"ut.optimization.AdaGrad_Spec":{"executedFeatures":["AdaGrad optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":13},"title":"","narrative":""},"ut.optimization.Momentum_Spec":{"executedFeatures":["Momentum optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":7},"title":"","narrative":"Momentum is an extension to the gradient descent optimization \n algorithm that allows the search to build inertia in a direction \n in the search space and overcome the oscillations of noisy \n gradients and coast across flat spots of the search space."},"ut.optimization.RMSProp_Spec":{"executedFeatures":["RMSprop optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":14},"title":"","narrative":"**Root Mean Squared Propagation**, or RMSProp, is an extension of gradient \n descent and the AdaGrad version of gradient descent that uses a \n decaying average of partial gradients in the adaptation of the \n step size for each parameter."},"ut.tensors.Copy_Spec":{"executedFeatures":["A deep copy of a slice tensor is also a deep copy of the underlying data array.","A deep copy of a tensor is also a deep copy of the underlying data array.","A shallow copy of a tensor will be flagged as such.","A shallow copy will share the same underlying data as its original tensor.","We can deep copy various types of tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":13},"title":"To Copy or Not to Copy","narrative":"In this specification we cover the behaviour of tensors with respect to their copy methods.\n There are to main ways to copy a tensor:
        \n 1. .shallowCopy()
        \n 2. .deepCopy()
        \n
        \n The first method creates a new tensor with the same underlying data array as the original tensor.
        \n The second method on the other hand creates a new tensor with a new data array.
        \n
        \n The first method is the most efficient, but it is not as safe as the second method.
        \n The second method is the most safe, but it is not as efficient.
        \n
        \n Besides these 2 main requirements, there are als some corner cases with respect to\n the components of a tensor (like for example its computation graph) which\n will be covered in this specification as well."},"ut.tensors.DimTrim_Spec":{"executedFeatures":["The \"dimTrim\" operation works on slices too!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":2},"title":"The 'dimTrim' Method","narrative":"The 'dimTrim' method is used to remove training and leading dimensions of length 1 from a tensor.\n This is useful when you want to perform operations on tensors of different ranks.\n For example, if you want to perform a dot product on two vectors, you can use the 'dimTrim' method\n to remove the dimension of length 1 from the vector, so that it becomes a scalar.\n This way you can perform the dot product on two scalars."},"ut.tensors.Expression_Based_Tensor_Instantiation_Spec":{"executedFeatures":["A tensor can be created from a function as expression.","We can instantiate tensors from various simple string expressions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"Expression based Tensor Instantiation","narrative":"This specification defines how a tensor can be instantiated\n using string expressions, which define operations to be executed.\n This form of tensor instantiation is very useful to avoid boilerplate code."},"ut.tensors.Fluent_Tensor_Creation_Spec":{"executedFeatures":["Initialization lambda based tensors can be created fluently.","Range based tensors can be created fluently.","Scalars can be created fluently.","Seed based tensors can be created fluently.","Tensors can be created fluently.","Value based tensors can be created fluently.","Vectors can be created fluently."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":18},"title":"","narrative":""},"ut.tensors.Functional_Nda_Spec":{"executedFeatures":["ND-Array mapping lambdas produce expected nd-arrays.","The \"map\" method is a shorter convenience method for mapping to the same type.","We can analyse the values of a nd-array using various predicate receiving methods","We can collect a stream into a nd-array.","We can find both min and max items in a tensor by providing a comparator.","We can find both min and max items in an ND-array by providing a comparator.","We can initialize an ND-Array using a filler lambda mapping indices to items.","We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\"."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":9,"passed":9,"successRate":1.0,"time":273},"title":"Functional ND-Arrays","narrative":"ND-Arrays expose a powerful API for performing operations on them\n in a functional style."},"ut.tensors.Functional_Tensor_Spec":{"executedFeatures":["Tensor mapping lambdas produce expected tensors.","The \"map\" method is a shorter convenience method for mapping to the same type.","We can analyse the values of a tensor using various predicate receiving methods","We can find both min and max items in a tensor by providing a comparator.","We can initialize a tensor using a filler lambda mapping indices to items.","We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\"."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":301},"title":"Functional Tensors","narrative":"Tensors expose a powerful API for performing operations on them\n in a functional style."},"ut.tensors.Reshape_Spec":{"executedFeatures":["The reshape operation supports autograd!","We can create a new tensor with a different shape.","We can use `-1` in the desired shape if we want the axis size to be determined automatically."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":2},"title":"Tensor Reshaping","narrative":"This specification demonstrates how to reshape tensors,\n which means to change the shape of a tensor.\n\n Note that immutability is a core concept of the Neureka library.\n This means that the `Tensor` API does not expose mutability directly.\n Instead, it exposes methods that return new instances of `Tensor`\n that are derived from the original instance.\n \n This is also true for reshaping operations.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.tensors.Tensor_As_Container_Spec":{"executedFeatures":["More tensor operations translate to custom data type \"ComplexNumber\".","Plus operator on String tensors works element-wise.","Tensor operations translate to custom data type \"ComplexNumber\".","We can apply predicates on the values of a tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":9},"title":"Why not have a tensor of words?","narrative":"Technically, tensors are merely fancy ND-arrays with some useful mathematical operations\n applicable to them...\n Therefore, there is no reason why a tensor would not also be able to store\n other kinds of objects besides numbers like strings for example.\n This specification ensures that tensors can hold and index many other things..."},"ut.tensors.Tensor_Assign_Spec":{"executedFeatures":["Assignment can be easily achieved through subscription operators.","We can assign one slice into another one.","We can use the \"mut\" API to assign the contents of one tensor into another one."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"Tensor Inline Assignment","narrative":"In this specification we cover the behaviour of tensors with respect to the assignment operation\n as well as the assignment of individual tensor items."},"ut.tensors.Tensor_Conversion_Spec":{"executedFeatures":["Tensors value type can be changed by calling \"toType(...)\".","We can change the data type of all kinds of tensors.","We turn a tensor into a scalar value or string through the \"as\" operator!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":11},"title":"Tensor Type Conversion","narrative":"Here we specify how a tensor can be converted to other data types\n like for example another tensor of a different data type."},"ut.tensors.Tensor_Convolution_Spec":{"executedFeatures":["Autograd works with simple 2D convolution.","Convolution can be performed using non-quadratic matrix tensors.","Convolution can be performed using tensors with an additional dimension as batch size.","Convolution with tensors of the same shape is equivalent to a dot product.","Manual convolution produces expected result.","Sime convolution works as expected eith autograd.","Tensors have the correct layout after convolution.","The \"x\" (convolution) operator produces expected results (On the CPU).","Very simple manual convolution produces expected result.","We can perform a convolution operation on a 2D tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":10,"passed":10,"successRate":1.0,"time":639},"title":"Tensor Convolution","narrative":"This specification shows how convolution can be performed on tensors.\n\n Convolution is a linear operation which is not only important for image processing but also\n a central player in the field of machine learning (especially for computer vision).\n It is used to extract features from images and other typically ~2 dimensional data.\n Other than that it is extremely important in the field of signal processing."},"ut.tensors.Tensor_Device_Spec":{"executedFeatures":["Adding OpenCL device to tensor makes tensor be \"outsourced\" and contain the Device instance as component.","Tensors try to migrate themselves to a device that is being added to them as component.","The device of a tensor can be accessed via the \"device()\" method.","When creating slices of tensors then this should trigger a \"parent - child\" relation noticeable to the device!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":4},"title":"Tensors on Devices","narrative":"This unit test specification covers \n the expected behavior of tensors when interacting\n with instances of implementations of the Device interface."},"ut.tensors.Tensor_Dot_Product_Spec":{"executedFeatures":["The \"dot\" method calculates the dot product between vectors.","The \"dot\" operation supports autograd.","The dot operation work even when one tensor is virtual.","The dot operation works for virtual tensors as well.","The dot product operation runs on any device.","The dot product works across different types and devices.","You can slice a Matrix into vectors and then used them for dot products."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":137},"title":"Tensor Dot Products","narrative":"A tensor can also be a simple vector, which is a tensor of rank 1.\n This specification demonstrates how to perform dot products on tensors of rank 1."},"ut.tensors.Tensor_Generics_Spec":{"executedFeatures":["1D tensors can be created from primitive arrays.","Anonymous tensor instance has the default datatype class as defined in Neureka settings.","We can create a tensor of strings."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":4},"title":"Tensors as Generic Containers","narrative":"Tensors do not just store numeric data.\n They can hold anything which can be stuffed into a \"Object[]\" array.\n You could even create a tensor of tensors!"},"ut.tensors.Tensor_Gradient_Spec":{"executedFeatures":["Gradient of tensor is being applies regardless of the tensor requiring gradient or not","Tensors can have gradients but not require them.","Tensors that have gradients but do not require them still print them."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"Gradients are Tensors which are Components of other Tensors","narrative":"This specification defines the gradient API on tensors.\n So one ought to be able to check wetter or not a tensor has a gradient attached to it or not.\n In that case one should be able to get this gradient and then work with\n it independently of the original tensor to which it belongs to..."},"ut.tensors.Tensor_IO_Spec":{"executedFeatures":["A tensor produced by the static \"Tensor.newRandom(shape)\" has expected \"random\" value.","Indexing after reshaping works as expected.","Tensor value type can not be changed by passing float or double arrays to it.","Tensor values can be manipulated","The tensor data array can be modified by targeting them with an index.","We can manipulate the underlying data array of a tensor through the mut API.","We can re-populate a tensor of shorts from a single scalar value!","When we try to manipulate the underlying data array of a virtual tensor then it will become actual."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":31},"title":"Reading and Writing Tensor Items","narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to read from and write to the state of a tensor."},"ut.tensors.Tensor_Instantiation_Spec":{"executedFeatures":["A matrix tensor can be instantiated using lists for it's shape and values.","A simple 2D vector can be instantiated using lists for it's shape and values.","Passing a seed in the form of a String to a tensor produces pseudo random items.","Scalar tensors can be created via static factory methods","Tensors can be instantiated based on arrays for both shapes and values.","Tensors can be instantiated with String seed.","Vector tensors can be instantiated via factory methods."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":11},"title":"Instantiating Tensors","narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to show how a tensor can be instantiated in different ways."},"ut.tensors.Tensor_Interop_Spec":{"executedFeatures":["Not all tensor can be converted to images.","Tensor can be converted to buffered images."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":8},"title":"Tensors play well with other data structures!","narrative":"Tensors should have good interoperability with other JDK data structures like images.\n In this specification we define these interoperability requirements."},"ut.tensors.Tensor_Layout_Spec":{"executedFeatures":["A new transposed version of a given tensor will be returned by the \"T()\" method.","Matrix multiplication works for both column and row major matrices across devices."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":133},"title":"Row or Column Major. Why not both?","narrative":"Although Neureka exposes tensors as row major tensors from \n a users point of view, it does in fact support both row major and column major \n based tensor layout under the hood.\n Here we cover how the layout of tensors can be modified\n and we ensure the different tensor types still work as expected...\n (The features in this specification involve mutating tensors, be careful when playing around with this yourself)"},"ut.tensors.Tensor_Operation_Spec":{"executedFeatures":["Activation functions work across types on slices and non sliced tensors.","Auto reshaping and broadcasting works and the result can be back propagated.","New method \"asFunction\" of String added at runtime is callable by groovy and also works.","New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work.","Operators \"+,*,**\" produce expected results with gradients which can be accessed via a \"Ig[0]\" Function instance","Overloaded operation methods on tensors produce expected results when called.","Scalar broadcasting works across devices.","Simple slice addition produces expected result.","The \"dot\" operation reshapes and produces valid \"x\" operation result.","The \"matMul\" operation produces the expected result.","The \"random\" function/operation populates tensors randomly.","The transpose operation exposed by the \"T()\" method, supports autograd.","The values of a randomly populated tensor seems to adhere to a gaussian distribution.","You can do matrix multiplication using 2 transposed matrices.","You can do matrix multiplication using transposed matrices as second operand.","You can do matrix multiplication using transposed matrices."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":16,"totalFeatures":16,"passed":16,"successRate":1.0,"time":5029},"title":"Running Tensors through operations","narrative":"This specification shows how to use the tensor API to run tensors through various operations.\n Operations are triggered either by simply calling methods on tensors or by using \n `Function` objects which are used to define custom operations in the form \n of a syntax tree."},"ut.tensors.Tensor_Slicing_Spec":{"executedFeatures":["A tensor can be sliced by passing ranges in the form of lists (Groovy ranges).","A tensor can be sliced by passing ranges in the form of primitive arrays.","Normal slicing will try to do autograd.","Slicing is also a Function with autograd support!","The \"at\" method and the \"from\" / \"to\" methods can be mixed when slicing a tensor.","The slice builder also supports slicing with custom step sizes.","We can avoid autograd when slicing by using the \"detached\" instead of the \"get\" method.","We can slice a scalar tensor from a larger tensor of rank 4.","When Slicing only one axis using the SliceBuilder API, the other axes will be sliced implicitly."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":9,"passed":9,"successRate":1.0,"time":13},"title":"Tensors within Tensors","narrative":"ND-Array data structures can be \"sliced\" in the sense\n that one can create a subset view of the underlying data inside a tensor\n through a new tensor instance...\n This can be a tedious and complicated procedure.\n Therefore a tensor should expose a various user friendly API for slicing which\n are also fit for various languages.\n This specification covers these APIs for tensor slicing."},"ut.tensors.Tensor_State_Spec":{"executedFeatures":["A tensor can be instantiated from a item type class and nested lists.","Numeric tensors as String can be formatted on an entry based level.","Tensor created from shape and datatype has expected state.","Tensors as String can be formatted depending on shape.","Tensors as String can be formatted on an entry based level.","The data and the value of a tensor a 2 different things!","We can create scalar tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":16},"title":"The Tensor Initialization and State Specification","narrative":"This specification defines the expected states of freshly instantiated\n and initialized tensors.\n After a tensor was created successfully we expect it \n to have certain properties like a shape, rank, type and data array\n among other things."},"ut.tensors.Tensor_Stats_Spec":{"executedFeatures":["A tensor can be summed alongside a specific axis.","Both the min and max operation support autograd (back-propagation).","Multiple dimensions of a tensor can selectively be summed up.","The sum operation support autograd (back-propagation).","There is no need to use a function, we can use the min() and max() methods on tensors instead.","We can get pre-instantiated min and max functions from the library context.","We can use the \"sum\" method to sum the items of a tensor.","We can use the max operation as a function"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":230},"title":"Reducing Tensors","narrative":"Various kinds of operations reduce tensors to scalars,\n the most common ones being the min and max operations \n which find the smallest as well as largest number among all \n items of a tensor.\n Neureka exposes various different ways to achieve this,\n all of which are also differential (autograd support)."},"ut.tensors.Tensor_Version_Spec":{"executedFeatures":["Inline operations cause illegal state exceptions.","Inline operations causes version incrementation.","Non-inline operations do not cause version incrementation.","Storing a tensor on a device should not change the version of a tensor (Even though its data changed technically)."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":50},"title":"Tensor (Data Array) Version","narrative":"There are two fundamental categories of operations\n which can be applied to tensors : \n Inline operations and Non-Inline operations! \n \n Inline operations are often times problematic because they produce\n side effects by changing passed tensors instead of producing new ones... \n One such bad side effect can easily occur for tensors involved in the\n autograd system, more specifically: the recorded computation graph. \n Inline operations can break the mathematically pureness of the back-propagation\n procedure by for example changing partial derivatives...
        \n In order to prevent said errors from occurring unnoticed tensors\n have versions which will increment when the underlying data of the tensor changes. \n This version will be tracked by the computation graph as well in order to\n match it with the ones stored inside the tensor. \n A mismatch would then yield an exception! \n \n This specification is responsible for defining the behaviour of this\n version number with respect to their wrapping tensors as well as computation graph nodes."},"ut.tensors.exceptions.Tensor_Delete_Exception_Spec":{"executedFeatures":["A deleted tensor will tell you that it has been deleted.","A deleted tensor will throw an exception when accessing its configuration.","A deleted tensor will throw an exception when accessing its data type.","A deleted tensor will throw an exception when accessing its data.","A deleted tensor will throw an exception when modifying its data type.","A deleted tensor will throw an exception when trying to modify its data.","A deleted tensor will throw an exception when trying to set its configuration."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":3},"title":"","narrative":""},"ut.tensors.exceptions.Tensor_Exception_Spec":{"executedFeatures":["Building a tensor with \"null\" as shape argument throws an exception.","Building a tensor with 0 shape arguments throws an exception.","Casting a tensor as something unusual will cuas an exception to be thrown.","Out of dimension bound causes descriptive exception!","Passing an invalid key object into the \"getAt\" method causes a descriptive exception.","Passing an invalid object into Tensor constructor causes descriptive exception.","Passing null to various methods of the tensor API will throw exceptions.","Trying to inject an empty tensor into another causes fitting exception."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":14},"title":"Tensors Exception Behavior","narrative":"This specification covers the behavior of the Tensor class in\n exceptional scenarios which are contrary to its intended use.\n The purpose of this is to assert that the Tensor class will provide\n useful feedback to a user to explain that a misuse of its API\n occurred so that the user can correct this misuse."},"ut.utility.Cleaner_Testing":{"executedFeatures":["The DeviceCleaner triggers registered cleaner actions when things are eligible for GC."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":260},"title":"How Neureka Cleans Up","narrative":"Under the hood \n Neureka deals whith large arrays of\n data, which are often times \n native data arrays requiring explicit\n memory freeing!\n This freeing of memory can happen at any time\n during the livetime of a nd-array, however\n it should happen at least up until the nd-arra/tensor\n objects representing their referenced data arrays become\n eligible for garbage collection.\n This specification ensures that the custom garbage\n cleaner implementation used by Neureka fulfills this role"},"ut.utility.DataConverter_Spec":{"executedFeatures":["An array of any type of object may be converted to a array of primitives.","The DataConverter can convert the given array data."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":2},"title":"","narrative":""},"ut.utility.FileHandle_Spec":{"executedFeatures":["Fully labeled tenors will be stored with their labels included when saving them as CSV.","Partially labeled tenors will be stored with their labels included when saving them as CSV.","Test reading IDX file format.","Test writing IDX file format.","The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors.","We can load image files as tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":536},"title":"","narrative":""},"ut.utility.ListReader_Exception_Spec":{"executedFeatures":["The ListReader will detect inconsistent degrees of nesting in the provided data.","The ListReader will detect inconsistent types in the provided data."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":3},"title":"","narrative":""},"ut.utility.ListReader_Spec":{"executedFeatures":["The ListReader can interpret nested lists into a shape list and value list.","The ListReader can interpret nested lists resembling a 3D tensor into a shape list and value list.","The ListReader can interpret nested lists resembling a matrix into a shape list and value list."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":3},"title":"The Internal ListReader turning lists into flat arrays with shape and type data","narrative":"This specification covers an internal class which should not be used\n outside this library, namely the ListReader class.\n This class is simply a converter which turns nested lists\n into flat arrays alongside the type of the elements and the shape of this \"tensor\"."},"ut.utility.Utility_Spec":{"executedFeatures":["Object arrays can be converted to primitive arrays."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":6},"title":"","narrative":""}} \ No newline at end of file +{"Example_Spec.Example_Spec":{"executedFeatures":["Call me feature not unit test!","I am readable and also best practice!","Numbers to the power of two with a fancy data table!","Should be able to remove from list","iAmNotSoReadable"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":5,"passed":7,"successRate":1.0,"time":68},"title":"An Introduction to writing Spock Specifications","narrative":"Hello and welcome to the example / template specification of this project.\n This is a simple introduction as to how to get started writing Spock specifications.\n \n Spock works on top of Groovy which is in essence a syntactic super-set of Java.\n That means that one can write Java code in Groovy, and 99% of the time it will \n work the exact same way."},"it.Calculus_Stress_Test":{"executedFeatures":["Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors.","Activation functions work across types.","Dot operation stress test runs error free and produces expected result","Stress test runs error free and produces expected result","The broadcast operation stress test runs error free and produces expected result"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":69,"totalFeatures":5,"passed":69,"successRate":1.0,"time":1086},"title":"","narrative":""},"it.Cross_Device_Sliced_Tensor_System_Test":{"executedFeatures":["Cross device sliced tensor integration test runs without errors.","Slices can be created using the SliceBuilder."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":2,"passed":4,"successRate":1.0,"time":146},"title":"Cross Device Tensor Slicing","narrative":""},"it.Cross_Device_Spec":{"executedFeatures":["A gradient of ones can be set by calling the backward method on a tensor sitting on any device.","Convolution can model matrix multiplications across devices.","Cross device system test runs successfully.","Mapping tensors works for every device (even if they are not used).","Test simple NN implementation with manual backprop"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":79,"totalFeatures":5,"passed":79,"successRate":1.0,"time":6066},"title":"Cross Device Stress Test Specification","narrative":"This specification is pretty much a system test which covers\n the behavior of the library as a whole across multiple devices!\n No matter which device is being used for a given stress test, the result should be the same..."},"it.Eleven_Lines_NN_System_Spec":{"executedFeatures":["One can write a simple double based neural network in less than 11 lines of java like code using the \"@\" operator!","One can write a simple float based neural network in less than 11 lines of java like code!","One can write a simple neural network in less than 11 lines of code!","One can write a simple neural network with custom back-prop in 11 lines of code!","The pseudo random number generator works as expected for the weights used in the 11 line NN examples!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":5,"passed":5,"successRate":1.0,"time":491},"title":"NN Code Golfing!","narrative":"This system test specification uses the following Numpy\n code as reference implementation for the equivalent in Neureka\n or similar implementations and variations.\n The code below is a simple neural network in only 11 lines of code.\n\n \u00b4\u00b4\u00b4\n X = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1] ])\n y = np.array([[0,1,1,0]]).T\n W1 = 2*np.random.random((3,4)) - 1\n W2 = 2*np.random.random((4,1)) - 1\n for j in xrange(60000):\n l1 = 1/(1+np.exp(-(np.dot(X,W1))))\n l2 = 1/(1+np.exp(-(np.dot(l1,W2))))\n l2_delta = (y - l2)*(l2*(1-l2))\n l1_delta = l2_delta.dot(W2.T) * (l1 * (1-l1))\n W2 += l1.T.dot(l2_delta)\n W1 += X.T.dot(l1_delta)\n \u00b4\u00b4\u00b4"},"st.Benchmark_System_Test":{"executedFeatures":["Tensor can be constructed by passing List instances.","Test benchmark script and simple tensor constructor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":1900},"title":"","narrative":""},"st.Broad_System_Test":{"executedFeatures":["A function with expression \"(-3*(2*(i0*-1)))*(-1*i0)\" can be backpropagated.","A function with expression \"softplus((I[0]xI[1])*-100)\" can be backpropagated.","A function with expression \"softplus(tanh(I[0]*I[1]*2)*I[1])\" can be backpropagated.","The long broad integration test runs successfully."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":83},"title":"","narrative":""},"st.NN_Concepts_Spec":{"executedFeatures":["The attention mechanism (found in the commonly known transformer) demonstrated."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":201},"title":"Examining Neural Network Architecture Snippets","narrative":"This specification is intended to showcase some basic building blocks of \n various neural network architectures."},"st.Training_NNs_Spec":{"executedFeatures":["A simple 3 layer neural network converges.","A very simple 1 layer NN converges.","We can run the attention head test model."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":3,"passed":5,"successRate":1.0,"time":12622},"title":"Training a Neural Network Class","narrative":"When designing larger neural network architectures, what you would usually do is\n to create a class that represents the whole model (which itself might be composed\n of smaller models). \n \n This class would then represent something that can be executed and then trained.\n This Specification shows how to instantiate, execute and train various \n pre-defined example neural network models."},"ut.autograd.AD_And_Computation_Graph_Spec":{"executedFeatures":["Payloads and derivatives are null after garbage collection.","Reshaping produces expected computation graph and also works with reverse mode AD."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":1288},"title":"","narrative":""},"ut.autograd.Autograd_Explained":{"executedFeatures":["Simple automatic differentiation and propagation."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":11},"title":"Autograd - Automatic Differentiation","narrative":"Central to all neural networks in Neureka is the autograd package. \n The autograd package provides automatic differentiation for all default operations on Tensors. \n Neureka is a define-by-run library, which means that your backpropagation is defined by how \n your code is run, and that every single iteration can be different. \n \n The class neureka.Tensor is the central class of the main package. \n If you set its attribute 'rqsGradient' to True, Neureka starts to track all operations on it. \n When you finish the forward pass of your network \n you can call .backward() and have all the gradients computed \n and distributed to the tensors requiring them automatically. \n \n The gradient for a tensor will be accumulated into a child tensor (component) which \n can be accessed via the '.getGradient()' method. \n \n To stop a tensor from tracking history, you can call '.detach()' to detach it from the \n computation history, and to prevent future computation from being tracked."},"ut.autograd.Autograd_Flags_Explained":{"executedFeatures":["Advanced backpropagation on all AD-Modes ","We can create a shallow copy of a tensor detached from the computation graph."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":2,"passed":9,"successRate":1.0,"time":149},"title":"","narrative":""},"ut.autograd.Autograd_NN_Spec":{"executedFeatures":["Autograd work for simple matrix multiplications.","Autograd works for 2 matrix multiplications in a row.","Autograd works in a simple convolutional dot product and float based feed forward neural network.","Autograd works in a simple convolutional dot product based feed forward neural network.","Autograd works in a simple mat-mul based feed forward neural network."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":5,"passed":7,"successRate":1.0,"time":148},"title":"Simple Neural Network autograd integration test","narrative":"The integration test below has been implemented by using\n the following code and the result it produces as reference : \n https://medium.com/dair-ai/a-simple-neural-network-from-scratch-with-pytorch-and-google-colab-c7f3830618e0 \n \n The following seed has been used to assure reproducibility :\n 'torch.manual_seed(503672689411)'"},"ut.autograd.Autograd_Tensor_Spec":{"executedFeatures":["A tensor used as derivative within a computation graph will throw exception when trying to deleting it.","Second-Test \"x-mul\" autograd behaviour. (Not on device)","Test basic autograd behaviour. (Not on device)"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":3,"passed":4,"successRate":1.0,"time":52},"title":"","narrative":""},"ut.autograd.JITProp_Autograd_Tensor_Spec":{"executedFeatures":["Gradient auto-apply kicks in when used AD uses JIT prop","Test JIT propagation variant one.","Test JIT propagation variant two.","Test autograd without JIT and auto apply.","Test in-differential and JIT with auto apply","Test no JIT prop when forward AD","Test no preemptive gradient apply when not requested and auto apply and JIT_prop","Test pending error optimization"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":8,"passed":8,"successRate":1.0,"time":638},"title":"","narrative":""},"ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests":{"executedFeatures":["GraphNode instantiation throws exception because tensors of ExecutionCall do not return GraphNode instances.","GraphNode throws an exception when trying to execute an inline operation on inputs with active autograd."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":189},"title":"","narrative":""},"ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests":{"executedFeatures":["A tensor cannot be deleted if it is part of a graph and the tensor is used as derivative."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":8},"title":"","narrative":""},"ut.backend.Backend_Extension_Spec":{"executedFeatures":["Mock operation interacts with FunctionNode (AbstractFunction) instance as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":71},"title":"","narrative":""},"ut.backend.Backend_MatMul_Extension_Spec":{"executedFeatures":["GEMM matrix multiplication reference implementation can be set as custom OperationType and works as expected.","Test context mock for opencl reference implementations.","Tile parsing for kernel parameter calculation yields expected tile dimensions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":17,"totalFeatures":3,"passed":17,"successRate":1.0,"time":83},"title":"","narrative":""},"ut.backend.Matrix_Multiplication_Spec":{"executedFeatures":["The \"matMul\" method allows us to perform matrix multiplication.","The simple CPU matrix multiplication implementation works as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":16,"totalFeatures":2,"passed":16,"successRate":1.0,"time":63},"title":"Matrix Multiplication","narrative":"The tensor API exposes a useful method for Matrix Multiplication.\n This specification not only demonstrates how to use this method\n but also shows how matrix multiplication work \n for tensors with both row and column major layouts.\n (typically, column major is faster)"},"ut.backend.core.Backend_Algorithm_AD_Spec":{"executedFeatures":["Activation implementations behave as expected.","Broadcast implementations have expected properties.","Convolution implementations behave as expected.","Operator implementations behave as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":37,"totalFeatures":4,"passed":37,"successRate":1.0,"time":91},"title":"","narrative":""},"ut.backend.core.Backend_Algorithm_Implementation_Spec":{"executedFeatures":["Activation implementations have expected Executor instances.","HostExecutors of Operator implementations behave as expected.","Operator implementations have expected Executor instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":35,"totalFeatures":3,"passed":35,"successRate":1.0,"time":97},"title":"","narrative":""},"ut.backend.core.Backend_Functional_Algorithm_Spec":{"executedFeatures":["A functional algorithm cannot be used if it was not built properly!","A functional algorithm does not accept null as an answer!","A functional algorithm warns us when modified after it has been built!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":3,"passed":7,"successRate":1.0,"time":19},"title":"","narrative":""},"ut.backend.core.Matrix_Multiplication_Spec":{"executedFeatures":["The CPU matrix multiplication implementation works as expected.","The internal matrix multiplication test script runs!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":13,"totalFeatures":2,"passed":13,"successRate":1.0,"time":10522},"title":"Internal CPU based Matrix Multiplication","narrative":"This specification covers library internal matrix multiplication logic,\n specifically the CPU implementation.\n Do not depend on the API used in this specification as it is subject to change!"},"ut.backend.core.OpenCL_Backend_Spec":{"executedFeatures":["The OpenCL backend context can load implementations."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":21},"title":"","narrative":""},"ut.backend.core.Randomization_Spec":{"executedFeatures":["Randomization is in essence the same algorithm as JDKs \"Random\".","The Randomization class can fill various types of arrays with pseudo random numbers.","We can make slices of tensors random."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":3,"passed":10,"successRate":1.0,"time":39},"title":"","narrative":""},"ut.device.CPU_Spec":{"executedFeatures":["CPU knows the current number of available processor cores!","The CPU device will keep track of the amount of tensors it stores.","The CPU exposes a non null API for executing workloads in parallel.","Thread pool executes given workload in parallel"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":5707},"title":"The CPU device, an API for CPU based execution","narrative":"The CPU class, one of many implementations of the Device interface, \n is simply supposed to be an API for dispatching threaded workloads onto the CPU.\n Contrary to other types of device, the CPU will host tensor data by default, simply\n because the tensors will be stored in RAM if no device was specified."},"ut.device.Cross_Device_IO_Spec":{"executedFeatures":["We can use the access device API to read from a tensor.","We can use the access device API to write to a tensor"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":15,"totalFeatures":2,"passed":15,"successRate":1.0,"time":72},"title":"Devices manage the states of the tensors they store!","narrative":"Tensors should not manage their states\n themselves, simply because the type and location\n of the data is dependent on the device onto which they are stored.\n This specification tests of various device implementations\n enable reading to or writing from the tensors they store."},"ut.device.Cross_Device_Type_Spec":{"executedFeatures":["A device will keep track of the amount of tensors and data objects it stores.","Devices expose an API for accessing (reading and writing) the data of a tensor.","Devices store slices which can also be restored just like any other tensor.","Devices store tensors which can also be restored.","Execution calls containing null arguments will cause an exception to be thrown in device instances.","In total there are 3 different types of methods for finding device instances.","Passing a numeric array to a tensor should modify its contents!","Virtual tensors stay virtual when outsourced.","We can find Device implementations or null by passing search keys to the \"get\" method.","We can query the backend for devices by specifying both the requested type and a key word."],"ignoredFeatures":["Devices cannot store slices whose parents are not already stored."],"stats":{"failures":0,"errors":0,"skipped":1,"totalRuns":53,"totalFeatures":11,"passed":53,"successRate":1.0,"time":6152},"title":"Finding Device Types","narrative":"Neureka introduces a the concept of a `Device` which is an interface\n that represents a computational device used for executing tensor / nd-array operations on them.\n The `Device` interface is implemented by various classes which represent\n different types of accelerator hardware such as `CPUs`, `GPUs`, `TPUs`, `FPGAs`, etc.\n These various `Device` types can not be instantiated directly because they model \n the concrete and finite hardware that is available on any given system Neureka is running on.\n This means that they are usually instantiated lazily upon access request or \n upfront by the library backend (usually a backend extension built fo a specific device).\n In order to find these instances embedded in the library backend the `Device` interface\n exposes various static methods which can be used to find a device instance by name or type."},"ut.device.FileDevice_Spec":{"executedFeatures":["A file device stores tensors in idx files by default.","A file device stores tensors in various file formats.","A tensor loaded from a file device can be loaded again.","The file device can load known files in a directory."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":12,"totalFeatures":4,"passed":12,"successRate":1.0,"time":161},"title":"FileDevice, Storing Tensors in Files","narrative":"The `FileDevice` class, one of many implementations of the `Device` interface, \n represents a file directory which can store and load tensors as files (`idx`, `jpg`, `png`...)."},"ut.device.OpenCLDevice_Exception_Spec":{"executedFeatures":[],"ignoredFeatures":["Ad hoc compilation produces expected exceptions when duplication is found.","Ad hoc compilation produces expected exceptions.","An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","Trying to restore a tensor which is not on a device raises exception."],"stats":{"failures":0,"errors":0,"skipped":4,"totalRuns":0,"totalFeatures":4,"passed":0,"successRate":1.0,"time":6},"title":"OpenCLDevice Exception Handling","narrative":"The OpenCLDevice class, one of many implementations of the Device interface, \n represents physical OpenCL devices.\n This specification defines how instances of this class deal with exceptional information."},"ut.device.OpenCLDevice_Spec":{"executedFeatures":[],"ignoredFeatures":["Ad hoc compilation produces executable kernel.","Ad hoc compilation works for WIP general purpose matrix multiplication.","Ad hoc compilation works for custom column major based tiled matrix multiplication.","Ad hoc compilation works for custom simple row major based matrix multiplication.","Ad hoc matrix multiplication works for multiple of 16 matrices.","An OpenCLDevice loads tensors in a provided lambda temporarily.","We can get the items of an outsourced tensor as a primitive array.","We can take a look at the underlying data array of an outsourced tensor through the unsafe API."],"stats":{"failures":0,"errors":0,"skipped":8,"totalRuns":0,"totalFeatures":8,"passed":0,"successRate":1.0,"time":6},"title":"The OpenCLDevice Specification","narrative":"Tensors need devices for execution!\n By default tensors use the `CPU` device, but sometimes we want to\n use something more suitable for large amounts of data and a high degree of parallelization.\n This is were the `OpenCLDevice` comes into play!\n It is a `Device` implementation built on top of the JOCL library, a thin OpenCL API.\n We expect the `OpenCLDevice` to store tensors as well as being able to read and write\n data from and to stored tensors.\n Also, an `OpenCLDevice` should allows us to compile OpenCL kernel code on the fly..."},"ut.device.OpenCL_Spec":{"executedFeatures":[],"ignoredFeatures":["A given OpenCL context can be disposed!","An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","First found OpenCLDevice will have realistic numeric properties.","First found OpenCLDevice will have realistic properties inside summary query.","First found OpenCLDevice will have realistic text properties."],"stats":{"failures":0,"errors":0,"skipped":5,"totalRuns":0,"totalFeatures":5,"passed":0,"successRate":1.0,"time":1},"title":"Working with OpenCL","narrative":"Neureka models the OpenCL API through various types of classes.\n The most fundamental of these is the `OpenCLDevice` class which\n represents a single device with OpenCL support.\n Besides that, there is also the `OpenCLContext` class which\n represents a OpenCL contexts, platforms and multiple devices on said platforms..."},"ut.device.internal.CLFunctionCompiler_Spec":{"executedFeatures":["The CLFunctionCompiler produces an operation which properly integrates to the backend.","The CLFunctionCompiler produces the expected \"ad hoc\" kernel."],"ignoredFeatures":["The OpenCLDevice produces a working optimized Function for doubles.","The OpenCLDevice produces a working optimized Function for floats."],"stats":{"failures":0,"errors":0,"skipped":2,"totalRuns":2,"totalFeatures":4,"passed":2,"successRate":1.0,"time":206},"title":"Turning functions into kernels.","narrative":"Neureka parses mathematical expressions into an AST representation\n hidden behind the Function interface...\n This feature does not exist without reason, we can use\n this abstract syntax tree to compile to OpenCL kernels\n for optimal execution speed!"},"ut.device.internal.CPU_Kernel_Spec":{"executedFeatures":["The Reduce implementation for the CPU has realistic behaviour","The Sum implementation for the CPU has realistic behaviour"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":20,"totalFeatures":2,"passed":20,"successRate":1.0,"time":83},"title":"","narrative":""},"ut.device.internal.OpenCL_Data_Spec":{"executedFeatures":["The \"Data\" class can represent various OpenCL data types.","The OpenCLDevice specific Data class represents JVM data for OpenCL."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":39,"totalFeatures":2,"passed":39,"successRate":1.0,"time":89},"title":"","narrative":""},"ut.device.internal.OpenCL_Kernel_Unit_Spec":{"executedFeatures":["The CLDot implementation for the OpenCLDevice has realistic behaviour","The GEMM implementation for the OpenCLDevice has realistic behaviour","The Reduce implementation for the OpenCLDevice has realistic behaviour","The Sum implementation for the OpenCLDevice has realistic behaviour","The Sum implementation for the OpenCLDevice has realistic behaviour for when the number of elements is a prime."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":5,"passed":6,"successRate":1.0,"time":55},"title":"","narrative":""},"ut.dtype.DataType_Spec":{"executedFeatures":["DataType multi-ton instances behave as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":1,"passed":10,"successRate":1.0,"time":10},"title":"","narrative":""},"ut.dtype.NumericType_Spec":{"executedFeatures":["Conversion goes both ways and produces expected numeric values.","NumericType conversion to holder types yields expected results.","NumericType implementations behave as expected.","NumericType implementations return their expected properties."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":58,"totalFeatures":4,"passed":58,"successRate":1.0,"time":61},"title":"The NumericType and its implementations model their respective numeric data types.","narrative":"This specification covers the behavior of the NumericType interface\n which is responsible for modelling numeric data types which may or may not be native to the JVM. \n These implementations however do not model them in the traditional OO style\n but merely expose useful utility method for converting and representing \n these numeric data types using JVM types."},"ut.framing.Tensor_Framing_Spec":{"executedFeatures":["A matrix (rank 2 tensor) can be labeled and their labels can be used to extract slices / subsets.","A tensor can be labeled partially.","Rank 3 tensors can be labeled and their labels can be used to extract slices / subsets of tensors.","We can add labels to tensors through lists or maps passed to the \"label()\" method."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":35},"title":"Naming Tensors and their Dimensions.","narrative":"A powerful concept in the data science as well as machine learning\n world is something usually referred to as \"Data Frames\".\n These are highly flexible 2D data structures\n used to load and store CSV, CRV, etc... files for \n data exploration and further processing.\n Data frames are so powerful because\n their indices are labeled and therefore human readable.\n Neureka's tensors are general purpose data containers\n which may also stored data in 2 dimensions whose\n indices may also be something other than integers."},"ut.introductions.Tensor_NDArray_Spec":{"executedFeatures":["Tensor is a subtype of NdArray.","We can use tensors for numeric calculations (but not nd-arrays)."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":9},"title":"Tensors or Nd-arrays","narrative":"*What is the difference?*\n\nIn the world of machine learning we use something called a **'tensor'** to represent data.\nThey might be called **'nd-arrays'** in some other frameworks,\nbut although they are very similar, \nthere are also some important distinctions to be made between these two concepts.\nBoth are at their core merely multidimensional arrays, however,\nthey are different in their typical usage and API.\nnd-arrays are merely used to represent any type of data as a \ncollection of elements in a multidimensional grid, \ntensors on the other hand have additional requirements.\nThey are a type of nd-array which stores numeric data \nas well as expose various mathematical operations for said data.\nIn that sense it is actually merely a more complex kind of number.\nThis concept actually comes from the field of physics, \nwhere it is used to represent a physical quantity.\n\nNeureka models both concepts through the `Tensor` and the `Nda` interfaces.\n`Nda` is an abbreviation of `NdArray`, and `Tensor` is an abbreviation of `Tensor`.\nThe `Tensor` type is a subtype of the `Nda` type, exposing additional methods\nlike for example `plus`, `minus`, `times` and `divide`.\nBoth can be instantiated through static factory methods (and a fluent builder API)."},"ut.math.BackendContext_Spec":{"executedFeatures":["BackendContext instances can be created by cloning from Singleton instance.","BackendContext instances return Runner instances for easy visiting with return values.","BackendContext instances return Runner instances for easy visiting."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":5,"totalFeatures":3,"passed":5,"successRate":1.0,"time":13},"title":"The BackendContext is a cloneable context which can run Tasks.","narrative":"This specification defines the expected behaviour of the backend context\n which should expose a convenient API to work with.\n This API should allow for tasks to be running on a given context\n which is important for testing and modularity not only\n during library startup but also throughout the runtime."},"ut.math.ConCat_Spec":{"executedFeatures":["We can concatenate 2 float tensors alongside a specified axis!","We can concatenate 2 string tensors alongside a specified axis!","We can concatenate 2 tensors alongside a specified axis!","We can concatenate and then back-propagate 2 simple float tensors alongside a specified axis!","We can concatenate and then back-propagate 3 simple float tensors alongside a specified axis!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":5,"passed":7,"successRate":1.0,"time":60},"title":"Merging Tensors","narrative":"Tensors can not only be sliced, but also merged.\n This is most easily achieved through the concatenation operation, \n which stacks 2 tensors alongside a specified axis.\n This specification not only covers how you can concatenate tensors,\n but also how this works alongside autograd and non-numeric tensors."},"ut.math.Function_Exception_Spec":{"executedFeatures":["Function throws exception when arity does not match input number.","Function throws exception when not enough inputs provided."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":6},"title":"","narrative":""},"ut.math.Function_Parsing_Spec":{"executedFeatures":["Functions can derive themselves according to the provided index of the input which ought to be derived.","Parsed equations throw expected error messages.","Test parsed equations when building Function instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":37,"totalFeatures":3,"passed":37,"successRate":1.0,"time":39},"title":"Parsing Expressions into Functions","narrative":"Neureka uses the 'Function' interface as a representation of a\n nested structure of operations.\n This means that a 'Function' is simply an abstract syntax trees made up of other 'Function' implementations\n which are assembled together by a parser receiving a string expression.\n In this specification we ensure that function expressions will be properly parsed into\n 'Function' implementations."},"ut.math.Function_Scalar_Spec":{"executedFeatures":["Function \"(I[0]+1/I[0])**-I[0]\" instance returns expected scalar result.","Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars.","Function \"1/I[0]\" instance returns expected scalar results.","Function \"I[0]+1/I[0]\" instance returns expected scalar results.","Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance.","Test scalar results of various Function instances."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":66,"totalFeatures":6,"passed":66,"successRate":1.0,"time":42},"title":"Functions for Scalars","narrative":"The Function API and it's implementations \n receive and process arrays of scalars as arguments.\n Functions don't have to be used alongside tensors / nd-arrays,\n they can also compute derivatives based on scalar values."},"ut.math.Function_Spec":{"executedFeatures":["Function implementations ensure that internally created tensors are flagged as \"intermediate\" initially!","Function implementations ensure that outputs which are input members are not flagged as \"intermediate\"!","Function implementations will ensure the \"call\" and \"invoke\" does not return tensors flagged as \"intermediate\".","The library context exposes a set of useful functions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":40,"totalFeatures":4,"passed":40,"successRate":1.0,"time":46},"title":"Testing Default Methods on Functions","narrative":"This specification tests the default methods on functions\n through a simple dummy implementation of the Function interface."},"ut.math.Tensor_Function_Spec":{"executedFeatures":["Executed tensors are intermediate tensors.","Reshaping on 3D tensors works by instantiate a Function instance built from a String.","Tensor results of various Function instances return expected results.","The \"DimTrim\" operation works forward as well as backward!","The optimization function for the SGD algorithm produces the expected result","The softmax can be calculated alongside multiple axes.","The softmax can be calculated for a particular axis.","The softmax function can be applied to tensors with more than one dimension.","The tensor API has built-in methods for applying functions.","We can collect a stream into a tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":52,"totalFeatures":10,"passed":52,"successRate":1.0,"time":179},"title":"Applying Functions to Tensors","narrative":"A tensor would be nothing without being able to apply operations on them.\n However, calling operations manually in order to process your\n tensors can be a verbose and error prone task.\n This is where functions come into play.\n Neureka's functions are composed of operations forming an abstract syntax tree.\n Passing tensors to a function will route them trough this tree and apply\n all of the operations on the tensors for you."},"ut.miscellaneous.Weired_NN_Spec":{"executedFeatures":["Dot based feed forward and activation produces expected result."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":36},"title":"","narrative":"This specification is meant less as feature documentation and more as a\n chaos test for weired neural network architectures\n an unusual usages of the Neureka library."},"ut.ndas.Nda_Assign_Spec":{"executedFeatures":["Assignment can be easily achieved through subscription operators.","We can assign one slice into another one.","We can use the \"mut\" API to assign the contents of one nd-array into another one."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":7},"title":"Nda Inline Assignment","narrative":"In this specification we cover the behaviour of nda's with respect to the assignment operation\n as well as the assignment of individual Nda items."},"ut.ndas.Nda_Framing_Spec":{"executedFeatures":["An Nda can be labeled.","Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.","The slice of a labeled vector is labeled too.","We can label the columns and rows of a rank 3 nd-array.","We can label the columns of a rank 2 nd-array.","We can use labels as selectors for slicing."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":28},"title":"Nda framing","narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n \n This is also true for labeling operations, \n meaning that the Nda API does not directly expose methods that mutate labels of an Nda\n but instead provides methods that return new instances of Nda\n with different labels.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.ndas.Nda_Inplace_Framing_Spec":{"executedFeatures":["Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.","The slice of a labeled vector is labeled too.","We can concatenate more than 2 nd-arrays.","We can label the columns and rows of a rank 3 nd-array.","We can label the columns of a rank 2 nd-array.","We can use labels as selectors for slicing."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":23},"title":"NDA Framing","narrative":"Framing an nd-array is all about naming its axes and then using those names to\n access, read or write its values in a more convenient and human readable way."},"ut.ndas.Nda_Instantiation_Spec":{"executedFeatures":["A vector can be created from an array of values through the \"of\" method.","Common types of nd-arrays are best instantiated using type specific convenience methods.","ND-arrays can be created fluently."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":3,"passed":8,"successRate":1.0,"time":16},"title":"ND-Array Instantiation","narrative":"In this specification we cover how ND-arrays can be instantiated."},"ut.ndas.Nda_Items_Spec":{"executedFeatures":["An item can be converted to an Optional object.","Other than the \"orElse(T)\" method of the Optional class, the same method of an Item will throw an exception if the provided value is null.","The \"get\" method of an Item object will throw an exception if the item is missing.","We can check if items of a tensor is present or not.","We can get the value of an item.","We can use the \"orElse(T)\" method to avoid null values."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":10},"title":"The Nds Items API","narrative":"Nd-arrays are collections of items similar to other\n collection types in Java. \n One useful way to access the items of an nd-array is\n to use the items API.\n \n Using the `at` methods we can access an `Item` object\n which is a wrapper around the item's value and its\n index in the nd-array.\n \n The `Item` object is a simple data class which\n is very similar to the `Optional` class, meaning\n that it can either be empty or contain a value."},"ut.ndas.Nda_Mutation_Spec":{"executedFeatures":["A ND-Array can be mutated simply using the \"set\" method.","A ND-Array can be mutated using the \"at(..).set(..)\" methods.","A simple vector ND-Array can be mutated using the \"at(..).set(..)\" methods.","A simple vector ND-Array can be mutated using the \"setItemAt\" method.","We can use the subscription operator to mutate a simple vector ND-Array.","We can use the subscription operator to mutate an ND-Array."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":6,"totalFeatures":6,"passed":6,"successRate":1.0,"time":11},"title":"Mutating ND-Arrays","narrative":"ND-Arrays should be considered immutable, so we should prefer creating new \n ND-Arrays from existing ones using wither methods.\n However this is not always a good idea as it can be expensive to create new\n ND-Arrays, especially if the ND-Array is very large.\n The ability to mutate ND-Arrays is therefore provided, but only\n accessible via the mutation API exposed by the `getMut()` method."},"ut.ndas.Nda_Reshape_Spec":{"executedFeatures":["We can create a new Nda instance with a different shape."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":3},"title":"Nda Reshaping","narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n \n This is also true for reshaping operations, \n meaning that the Nda API does not expose methods that mutate the shape of an Nda\n but instead provides methods that return new instances of Nda\n with a different shape.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.ndim.NDConfiguration_Spec":{"executedFeatures":["Various NDConfigurations behave exactly like their general purpose implementation."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":20,"totalFeatures":1,"passed":20,"successRate":1.0,"time":55},"title":"Making Arrays N-Dimensional","narrative":"Under the hood Neureka implements powerful indexing \n abstractions through the `NDConfiguration` interface and its various implementations.\n This allows for the creation of tensors/nd-arrays with arbitrary dimensions, \n the ability to slice them into smaller tensors/nd-arrays with the same underlying data,\n and finally the ability to permute their axes (like transposing them for example).\n \n This specification however only focuses on the behaviour of the `NDConfiguration` interface\n which translates various types of indices."},"ut.ndim.Nda_Permute_Spec":{"executedFeatures":["We can use the \"permute\" method to rearrange the dimensions of an nd-array.","We can use the \"transpose\" method to transpose swap 2 dimensions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":6},"title":"Reshaping Nd-Arrays","narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It returns a new nd-array with the same data as the original nd-array, \n but with the specified dimensions rearranged. \n It is very useful for example when you want to\n change the order of dimensions, for example, if you have a nd-array with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n \n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern."},"ut.ndim.Shape_Spec":{"executedFeatures":["A shape can be created from a list of integers.","A shape can be created from a stream of ints.","A shape can be created from an iterable.","A shape can be mapped to a new shape.","A shape can be sliced.","Use the \"any\" or \"every\" method to check if a predicate holds for any or every value of the shape.","You can use the \"count(Predicate)\" method to count the number of values that satisfy a predicate."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":19},"title":"The Shape Tuple","narrative":"The `Shape` of an nd-array/tensor is in essence merely an immutable tuple of integers\n which define the size of each dimension of the tensor.\n So if you think of an nd-array as a grid of numbers, then the shape of the\n tensor is the size of the grid in each dimension.\n \n This specifications shows you how to create a shape and how to use it."},"ut.ndim.Tensor_NDConfiguration_Spec":{"executedFeatures":["NDConfiguration instances of tensors have expected state and behaviour.","NDConfiguration instances of tensors have expected state."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":11},"title":"What it means to be N-Dimensional","narrative":"This specification covers how implementations\n of the `NDConfiguration` interface manage to define\n what it means to be a n-dimensional tensor/nd-array."},"ut.ndim.Tensor_Permute_Spec":{"executedFeatures":["We can use the \"permute\" method to rearrange the dimensions of a tensor.","When matrices are transpose, they will change their layout type as expected."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":9},"title":"Reshaping Tensors","narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It produces a new tensor with the same data as the original tensor, \n but with the specified dimensions rearranged. \n \n This is very useful for example when you want to\n change the order of dimensions, for example, if you have a tensor with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n \n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern."},"ut.ndim.Tensor_Slice_Permute_Spec":{"executedFeatures":["A slice of a tensor changes as expected when reshaping it.","Reshaping a slice works as expected.","Two slices of one big tensor perform matrix multiplication flawless."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":11},"title":"Permuting Slices of Tensors","narrative":"Neureka provides a convenient way to permuting tensors\n even if they are slices of other tensors sharing the same underlying data.\n This is possible because of the under the hood indexing \n abstractions provided by the `NDConfiguration` interface and its various implementations."},"ut.neureka.Neureka_Spec":{"executedFeatures":["Backend related library objects adhere to the same toString formatting convention!","Every Thread instance has their own Neureka instance.","Neureka class instance has expected behaviour.","Neureka settings class can be locked causing its properties to be immutable.","Various library objects adhere to the same toString formatting convention!"],"ignoredFeatures":["OpenCL related library objects adhere to the same toString formatting convention!"],"stats":{"failures":0,"errors":0,"skipped":1,"totalRuns":82,"totalFeatures":6,"passed":82,"successRate":1.0,"time":4898},"title":"The Neureka context can be used and configured as expected.","narrative":"This specification covers the behavior of the Neureka class which\n exposes a global API for configuring thread local contexts and library settings.\n The purpose of this is to assert that the API exposed by the Neureka class \n is both thread local and configurable.\n This specification also exists to cover standards for the Neureka library in general."},"ut.optimization.ADAM_Spec":{"executedFeatures":["ADAM optimizes according to expected inputs","Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results.","Equations used by ADAM return expected result."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":19,"totalFeatures":3,"passed":19,"successRate":1.0,"time":92},"title":"","narrative":"ADAM is a more powerful alternative to the classical stochastic gradient descent. \n It combines the best properties of the AdaGrad and the RMSProp algorithms, which makes \n it especially well suited for sparse gradients and noisy data.\n Adam is the most popular among the adaptive optimizers\n because its adaptive learning rate working so well with sparse datasets."},"ut.optimization.AdaGrad_Spec":{"executedFeatures":["AdaGrad optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":1,"passed":10,"successRate":1.0,"time":28},"title":"","narrative":""},"ut.optimization.Momentum_Spec":{"executedFeatures":["Momentum optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":1,"passed":10,"successRate":1.0,"time":18},"title":"","narrative":"Momentum is an extension to the gradient descent optimization \n algorithm that allows the search to build inertia in a direction \n in the search space and overcome the oscillations of noisy \n gradients and coast across flat spots of the search space."},"ut.optimization.RMSProp_Spec":{"executedFeatures":["RMSprop optimizes according to expected inputs"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":10,"totalFeatures":1,"passed":10,"successRate":1.0,"time":28},"title":"","narrative":"**Root Mean Squared Propagation**, or RMSProp, is an extension of gradient \n descent and the AdaGrad version of gradient descent that uses a \n decaying average of partial gradients in the adaptation of the \n step size for each parameter."},"ut.tensors.Copy_Spec":{"executedFeatures":["A deep copy of a slice tensor is also a deep copy of the underlying data array.","A deep copy of a tensor is also a deep copy of the underlying data array.","A shallow copy of a tensor will be flagged as such.","A shallow copy will share the same underlying data as its original tensor.","We can deep copy various types of tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":17,"totalFeatures":5,"passed":17,"successRate":1.0,"time":38},"title":"To Copy or Not to Copy","narrative":"In this specification we cover the behaviour of tensors with respect to their copy methods.\n There are to main ways to copy a tensor:
        \n 1. .shallowCopy()
        \n 2. .deepCopy()
        \n
        \n The first method creates a new tensor with the same underlying data array as the original tensor.
        \n The second method on the other hand creates a new tensor with a new data array.
        \n
        \n The first method is the most efficient, but it is not as safe as the second method.
        \n The second method is the most safe, but it is not as efficient.
        \n
        \n Besides these 2 main requirements, there are als some corner cases with respect to\n the components of a tensor (like for example its computation graph) which\n will be covered in this specification as well."},"ut.tensors.DimTrim_Spec":{"executedFeatures":["The \"dimTrim\" operation works on slices too!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":5},"title":"The 'dimTrim' Method","narrative":"The 'dimTrim' method is used to remove training and leading dimensions of length 1 from a tensor.\n This is useful when you want to perform operations on tensors of different ranks.\n For example, if you want to perform a dot product on two vectors, you can use the 'dimTrim' method\n to remove the dimension of length 1 from the vector, so that it becomes a scalar.\n This way you can perform the dot product on two scalars."},"ut.tensors.Expression_Based_Tensor_Instantiation_Spec":{"executedFeatures":["A tensor can be created from a function as expression.","We can instantiate tensors from various simple string expressions."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":14},"title":"Expression based Tensor Instantiation","narrative":"This specification defines how a tensor can be instantiated\n using string expressions, which define operations to be executed.\n This form of tensor instantiation is very useful to avoid boilerplate code."},"ut.tensors.Fluent_Tensor_Creation_Spec":{"executedFeatures":["Initialization lambda based tensors can be created fluently.","Range based tensors can be created fluently.","Scalars can be created fluently.","Seed based tensors can be created fluently.","Tensors can be created fluently.","Value based tensors can be created fluently.","Vectors can be created fluently."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":46,"totalFeatures":7,"passed":46,"successRate":1.0,"time":46},"title":"","narrative":""},"ut.tensors.Functional_Nda_Spec":{"executedFeatures":["ND-Array mapping lambdas produce expected nd-arrays.","The \"map\" method is a shorter convenience method for mapping to the same type.","We can analyse the values of a nd-array using various predicate receiving methods","We can collect a stream into a nd-array.","We can find both min and max items in a tensor by providing a comparator.","We can find both min and max items in an ND-array by providing a comparator.","We can initialize an ND-Array using a filler lambda mapping indices to items.","We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\"."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":9,"totalFeatures":9,"passed":9,"successRate":1.0,"time":558},"title":"Functional ND-Arrays","narrative":"ND-Arrays expose a powerful API for performing operations on them\n in a functional style."},"ut.tensors.Functional_Tensor_Spec":{"executedFeatures":["We can find both min and max items in a tensor by providing a comparator.","We can initialize a tensor using a filler lambda mapping indices to items.","We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\"."],"ignoredFeatures":["Tensor mapping lambdas produce expected tensors.","The \"map\" method is a shorter convenience method for mapping to the same type.","We can analyse the values of a tensor using various predicate receiving methods"],"stats":{"failures":0,"errors":0,"skipped":3,"totalRuns":4,"totalFeatures":7,"passed":4,"successRate":1.0,"time":230},"title":"Functional Tensors","narrative":"Tensors expose a powerful API for performing operations on them\n in a functional style."},"ut.tensors.Reshape_Spec":{"executedFeatures":["The reshape operation supports autograd!","We can create a new tensor with a different shape.","We can use `-1` in the desired shape if we want the axis size to be determined automatically."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":13},"title":"Tensor Reshaping","narrative":"This specification demonstrates how to reshape tensors,\n which means to change the shape of a tensor.\n\n Note that immutability is a core concept of the Neureka library.\n This means that the `Tensor` API does not expose mutability directly.\n Instead, it exposes methods that return new instances of `Tensor`\n that are derived from the original instance.\n \n This is also true for reshaping operations.\n \n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!"},"ut.tensors.Tensor_As_Container_Spec":{"executedFeatures":["More tensor operations translate to custom data type \"ComplexNumber\".","Plus operator on String tensors works element-wise.","Tensor operations translate to custom data type \"ComplexNumber\".","We can apply predicates on the values of a tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":4,"passed":4,"successRate":1.0,"time":55},"title":"Why not have a tensor of words?","narrative":"Technically, tensors are merely fancy ND-arrays with some useful mathematical operations\n applicable to them...\n Therefore, there is no reason why a tensor would not also be able to store\n other kinds of objects besides numbers like strings for example.\n This specification ensures that tensors can hold and index many other things..."},"ut.tensors.Tensor_Assign_Spec":{"executedFeatures":["Assignment can be easily achieved through subscription operators.","We can assign one slice into another one.","We can use the \"mut\" API to assign the contents of one tensor into another one."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":3,"passed":3,"successRate":1.0,"time":11},"title":"Tensor Inline Assignment","narrative":"In this specification we cover the behaviour of tensors with respect to the assignment operation\n as well as the assignment of individual tensor items."},"ut.tensors.Tensor_Conversion_Spec":{"executedFeatures":["Tensors value type can be changed by calling \"toType(...)\".","We can change the data type of all kinds of tensors.","We turn a tensor into a scalar value or string through the \"as\" operator!"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":38,"totalFeatures":3,"passed":38,"successRate":1.0,"time":24},"title":"Tensor Type Conversion","narrative":"Here we specify how a tensor can be converted to other data types\n like for example another tensor of a different data type."},"ut.tensors.Tensor_Convolution_Spec":{"executedFeatures":["Autograd works with simple 2D convolution.","Convolution can be performed using non-quadratic matrix tensors.","Convolution can be performed using tensors with an additional dimension as batch size.","Convolution with tensors of the same shape is equivalent to a dot product.","Manual convolution produces expected result.","Sime convolution works as expected eith autograd.","Tensors have the correct layout after convolution.","The \"x\" (convolution) operator produces expected results (On the CPU).","Very simple manual convolution produces expected result.","We can perform a convolution operation on a 2D tensor."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":20,"totalFeatures":10,"passed":20,"successRate":1.0,"time":1132},"title":"Tensor Convolution","narrative":"This specification shows how convolution can be performed on tensors.\n\n Convolution is a linear operation which is not only important for image processing but also\n a central player in the field of machine learning (especially for computer vision).\n It is used to extract features from images and other typically ~2 dimensional data.\n Other than that it is extremely important in the field of signal processing."},"ut.tensors.Tensor_Device_Spec":{"executedFeatures":["Tensors try to migrate themselves to a device that is being added to them as component.","The device of a tensor can be accessed via the \"device()\" method.","When creating slices of tensors then this should trigger a \"parent - child\" relation noticeable to the device!"],"ignoredFeatures":["Adding OpenCL device to tensor makes tensor be \"outsourced\" and contain the Device instance as component."],"stats":{"failures":0,"errors":0,"skipped":1,"totalRuns":3,"totalFeatures":4,"passed":3,"successRate":1.0,"time":15},"title":"Tensors on Devices","narrative":"This unit test specification covers \n the expected behavior of tensors when interacting\n with instances of implementations of the Device interface."},"ut.tensors.Tensor_Dot_Product_Spec":{"executedFeatures":["The \"dot\" method calculates the dot product between vectors.","The \"dot\" operation supports autograd.","The dot operation work even when one tensor is virtual.","The dot operation works for virtual tensors as well.","The dot product operation runs on any device.","The dot product works across different types and devices.","You can slice a Matrix into vectors and then used them for dot products."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":21,"totalFeatures":7,"passed":21,"successRate":1.0,"time":37},"title":"Tensor Dot Products","narrative":"A tensor can also be a simple vector, which is a tensor of rank 1.\n This specification demonstrates how to perform dot products on tensors of rank 1."},"ut.tensors.Tensor_Generics_Spec":{"executedFeatures":["1D tensors can be created from primitive arrays.","Anonymous tensor instance has the default datatype class as defined in Neureka settings.","We can create a tensor of strings."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":3,"passed":7,"successRate":1.0,"time":8},"title":"Tensors as Generic Containers","narrative":"Tensors do not just store numeric data.\n They can hold anything which can be stuffed into a \"Object[]\" array.\n You could even create a tensor of tensors!"},"ut.tensors.Tensor_Gradient_Spec":{"executedFeatures":["Gradient of tensor is being applies regardless of the tensor requiring gradient or not","Tensors can have gradients but not require them.","Tensors that have gradients but do not require them still print them."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":4,"totalFeatures":3,"passed":4,"successRate":1.0,"time":8},"title":"Gradients are Tensors which are Components of other Tensors","narrative":"This specification defines the gradient API on tensors.\n So one ought to be able to check wetter or not a tensor has a gradient attached to it or not.\n In that case one should be able to get this gradient and then work with\n it independently of the original tensor to which it belongs to..."},"ut.tensors.Tensor_IO_Spec":{"executedFeatures":["A tensor produced by the static \"Tensor.newRandom(shape)\" has expected \"random\" value.","Indexing after reshaping works as expected.","Tensor value type can not be changed by passing float or double arrays to it.","Tensor values can be manipulated","The tensor data array can be modified by targeting them with an index.","We can manipulate the underlying data array of a tensor through the mut API.","We can re-populate a tensor of shorts from a single scalar value!","When we try to manipulate the underlying data array of a virtual tensor then it will become actual."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":43,"totalFeatures":8,"passed":43,"successRate":1.0,"time":69},"title":"Reading and Writing Tensor Items","narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to read from and write to the state of a tensor."},"ut.tensors.Tensor_Instantiation_Spec":{"executedFeatures":["A matrix tensor can be instantiated using lists for it's shape and values.","A simple 2D vector can be instantiated using lists for it's shape and values.","Passing a seed in the form of a String to a tensor produces pseudo random items.","Scalar tensors can be created via static factory methods","Tensors can be instantiated based on arrays for both shapes and values.","Tensors can be instantiated with String seed.","Vector tensors can be instantiated via factory methods."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":25,"totalFeatures":7,"passed":25,"successRate":1.0,"time":27},"title":"Instantiating Tensors","narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to show how a tensor can be instantiated in different ways."},"ut.tensors.Tensor_Interop_Spec":{"executedFeatures":["Not all tensor can be converted to images.","Tensor can be converted to buffered images."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":2,"passed":8,"successRate":1.0,"time":35},"title":"Tensors play well with other data structures!","narrative":"Tensors should have good interoperability with other JDK data structures like images.\n In this specification we define these interoperability requirements."},"ut.tensors.Tensor_Layout_Spec":{"executedFeatures":["A new transposed version of a given tensor will be returned by the \"T()\" method.","Matrix multiplication works for both column and row major matrices across devices."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":3,"totalFeatures":2,"passed":3,"successRate":1.0,"time":184},"title":"Row or Column Major. Why not both?","narrative":"Although Neureka exposes tensors as row major tensors from \n a users point of view, it does in fact support both row major and column major \n based tensor layout under the hood.\n Here we cover how the layout of tensors can be modified\n and we ensure the different tensor types still work as expected...\n (The features in this specification involve mutating tensors, be careful when playing around with this yourself)"},"ut.tensors.Tensor_Operation_Spec":{"executedFeatures":["Activation functions work across types on slices and non sliced tensors.","Auto reshaping and broadcasting works and the result can be back propagated.","New method \"asFunction\" of String added at runtime is callable by groovy and also works.","New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work.","Operators \"+,*,**\" produce expected results with gradients which can be accessed via a \"Ig[0]\" Function instance","Overloaded operation methods on tensors produce expected results when called.","Scalar broadcasting works across devices.","Simple slice addition produces expected result.","The \"dot\" operation reshapes and produces valid \"x\" operation result.","The \"matMul\" operation produces the expected result.","The \"random\" function/operation populates tensors randomly.","The transpose operation exposed by the \"T()\" method, supports autograd.","The values of a randomly populated tensor seems to adhere to a gaussian distribution.","You can do matrix multiplication using 2 transposed matrices.","You can do matrix multiplication using transposed matrices as second operand.","You can do matrix multiplication using transposed matrices."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":164,"totalFeatures":16,"passed":164,"successRate":1.0,"time":10225},"title":"Running Tensors through operations","narrative":"This specification shows how to use the tensor API to run tensors through various operations.\n Operations are triggered either by simply calling methods on tensors or by using \n `Function` objects which are used to define custom operations in the form \n of a syntax tree."},"ut.tensors.Tensor_Slicing_Spec":{"executedFeatures":["A tensor can be sliced by passing ranges in the form of lists (Groovy ranges).","A tensor can be sliced by passing ranges in the form of primitive arrays.","Normal slicing will try to do autograd.","Slicing is also a Function with autograd support!","The \"at\" method and the \"from\" / \"to\" methods can be mixed when slicing a tensor.","The slice builder also supports slicing with custom step sizes.","We can avoid autograd when slicing by using the \"detached\" instead of the \"get\" method.","We can slice a scalar tensor from a larger tensor of rank 4.","When Slicing only one axis using the SliceBuilder API, the other axes will be sliced implicitly."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":14,"totalFeatures":9,"passed":14,"successRate":1.0,"time":36},"title":"Tensors within Tensors","narrative":"ND-Array data structures can be \"sliced\" in the sense\n that one can create a subset view of the underlying data inside a tensor\n through a new tensor instance...\n This can be a tedious and complicated procedure.\n Therefore a tensor should expose a various user friendly API for slicing which\n are also fit for various languages.\n This specification covers these APIs for tensor slicing."},"ut.tensors.Tensor_State_Spec":{"executedFeatures":["A tensor can be instantiated from a item type class and nested lists.","Numeric tensors as String can be formatted on an entry based level.","Tensor created from shape and datatype has expected state.","Tensors as String can be formatted depending on shape.","Tensors as String can be formatted on an entry based level.","The data and the value of a tensor a 2 different things!","We can create scalar tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":19,"totalFeatures":7,"passed":19,"successRate":1.0,"time":52},"title":"The Tensor Initialization and State Specification","narrative":"This specification defines the expected states of freshly instantiated\n and initialized tensors.\n After a tensor was created successfully we expect it \n to have certain properties like a shape, rank, type and data array\n among other things."},"ut.tensors.Tensor_Stats_Spec":{"executedFeatures":["A tensor can be summed alongside a specific axis.","Both the min and max operation support autograd (back-propagation).","Multiple dimensions of a tensor can selectively be summed up.","The sum operation support autograd (back-propagation).","There is no need to use a function, we can use the min() and max() methods on tensors instead.","We can get pre-instantiated min and max functions from the library context.","We can use the \"sum\" method to sum the items of a tensor.","We can use the max operation as a function"],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":22,"totalFeatures":8,"passed":22,"successRate":1.0,"time":225},"title":"Reducing Tensors","narrative":"Various kinds of operations reduce tensors to scalars,\n the most common ones being the min and max operations \n which find the smallest as well as largest number among all \n items of a tensor.\n Neureka exposes various different ways to achieve this,\n all of which are also differential (autograd support)."},"ut.tensors.Tensor_Version_Spec":{"executedFeatures":["Inline operations cause illegal state exceptions.","Inline operations causes version incrementation.","Non-inline operations do not cause version incrementation."],"ignoredFeatures":["Storing a tensor on a device should not change the version of a tensor (Even though its data changed technically)."],"stats":{"failures":0,"errors":0,"skipped":1,"totalRuns":22,"totalFeatures":4,"passed":22,"successRate":1.0,"time":135},"title":"Tensor (Data Array) Version","narrative":"There are two fundamental categories of operations\n which can be applied to tensors : \n Inline operations and Non-Inline operations! \n \n Inline operations are often times problematic because they produce\n side effects by changing passed tensors instead of producing new ones... \n One such bad side effect can easily occur for tensors involved in the\n autograd system, more specifically: the recorded computation graph. \n Inline operations can break the mathematically pureness of the back-propagation\n procedure by for example changing partial derivatives...
        \n In order to prevent said errors from occurring unnoticed tensors\n have versions which will increment when the underlying data of the tensor changes. \n This version will be tracked by the computation graph as well in order to\n match it with the ones stored inside the tensor. \n A mismatch would then yield an exception! \n \n This specification is responsible for defining the behaviour of this\n version number with respect to their wrapping tensors as well as computation graph nodes."},"ut.tensors.exceptions.Tensor_Delete_Exception_Spec":{"executedFeatures":["A deleted tensor will tell you that it has been deleted.","A deleted tensor will throw an exception when accessing its configuration.","A deleted tensor will throw an exception when accessing its data type.","A deleted tensor will throw an exception when accessing its data.","A deleted tensor will throw an exception when modifying its data type.","A deleted tensor will throw an exception when trying to modify its data.","A deleted tensor will throw an exception when trying to set its configuration."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":7,"totalFeatures":7,"passed":7,"successRate":1.0,"time":14},"title":"","narrative":""},"ut.tensors.exceptions.Tensor_Exception_Spec":{"executedFeatures":["Building a tensor with \"null\" as shape argument throws an exception.","Building a tensor with 0 shape arguments throws an exception.","Casting a tensor as something unusual will cuas an exception to be thrown.","Out of dimension bound causes descriptive exception!","Passing an invalid key object into the \"getAt\" method causes a descriptive exception.","Passing an invalid object into Tensor constructor causes descriptive exception.","Passing null to various methods of the tensor API will throw exceptions.","Trying to inject an empty tensor into another causes fitting exception."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":25,"totalFeatures":8,"passed":25,"successRate":1.0,"time":47},"title":"Tensors Exception Behavior","narrative":"This specification covers the behavior of the Tensor class in\n exceptional scenarios which are contrary to its intended use.\n The purpose of this is to assert that the Tensor class will provide\n useful feedback to a user to explain that a misuse of its API\n occurred so that the user can correct this misuse."},"ut.utility.Cleaner_Testing":{"executedFeatures":["The DeviceCleaner triggers registered cleaner actions when things are eligible for GC."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":1,"totalFeatures":1,"passed":1,"successRate":1.0,"time":1304},"title":"How Neureka Cleans Up","narrative":"Under the hood \n Neureka deals whith large arrays of\n data, which are often times \n native data arrays requiring explicit\n memory freeing!\n This freeing of memory can happen at any time\n during the livetime of a nd-array, however\n it should happen at least up until the nd-arra/tensor\n objects representing their referenced data arrays become\n eligible for garbage collection.\n This specification ensures that the custom garbage\n cleaner implementation used by Neureka fulfills this role"},"ut.utility.DataConverter_Spec":{"executedFeatures":["An array of any type of object may be converted to a array of primitives.","The DataConverter can convert the given array data."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":9},"title":"","narrative":""},"ut.utility.FileHandle_Spec":{"executedFeatures":["Fully labeled tenors will be stored with their labels included when saving them as CSV.","Partially labeled tenors will be stored with their labels included when saving them as CSV.","Test reading IDX file format.","Test writing IDX file format.","The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors.","We can load image files as tensors."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":24,"totalFeatures":6,"passed":24,"successRate":1.0,"time":787},"title":"","narrative":""},"ut.utility.ListReader_Exception_Spec":{"executedFeatures":["The ListReader will detect inconsistent degrees of nesting in the provided data.","The ListReader will detect inconsistent types in the provided data."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":2,"totalFeatures":2,"passed":2,"successRate":1.0,"time":6},"title":"","narrative":""},"ut.utility.ListReader_Spec":{"executedFeatures":["The ListReader can interpret nested lists into a shape list and value list.","The ListReader can interpret nested lists resembling a 3D tensor into a shape list and value list.","The ListReader can interpret nested lists resembling a matrix into a shape list and value list."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":3,"passed":8,"successRate":1.0,"time":7},"title":"The Internal ListReader turning lists into flat arrays with shape and type data","narrative":"This specification covers an internal class which should not be used\n outside this library, namely the ListReader class.\n This class is simply a converter which turns nested lists\n into flat arrays alongside the type of the elements and the shape of this \"tensor\"."},"ut.utility.Utility_Spec":{"executedFeatures":["Object arrays can be converted to primitive arrays."],"ignoredFeatures":[],"stats":{"failures":0,"errors":0,"skipped":0,"totalRuns":8,"totalFeatures":1,"passed":8,"successRate":1.0,"time":26},"title":"","narrative":""}} \ No newline at end of file diff --git a/docs/spock/reports/Example_Spec.Example_Spec.json b/docs/spock/reports/Example_Spec.Example_Spec.json index f56f0713d..e0957a073 100644 --- a/docs/spock/reports/Example_Spec.Example_Spec.json +++ b/docs/spock/reports/Example_Spec.Example_Spec.json @@ -1,22 +1,22 @@ { "className":"Example_Spec.Example_Spec", "title":"An Introduction to writing Spock Specifications", - "narrative":"Hello and welcome to the example / template specification of this project.\n This is a simple introduction as to how to get started writing Spock specifications.\n\n Spock works on top of Groovy which is in essence a syntactic super-set of Java.\n That means that one can write Java code in Groovy, and 99% of the time it will\n work the exact same way.", + "narrative":"Hello and welcome to the example / template specification of this project.\n This is a simple introduction as to how to get started writing Spock specifications.\n\n Spock works on top of Groovy which is in essence a syntactic super-set of Java.\n That means that one can write Java code in Groovy, and 99% of the time it will \n work the exact same way.", "subjects":[], "statistics":{ - "runs":"5", + "runs":"7", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.005 seconds" + "duration":"0.068 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"iAmNotSoReadable", "result":"PASS", - "duration":"0", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -55,7 +55,7 @@ { "id":"Should be able to remove from list", "result":"PASS", - "duration":"0", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -70,7 +70,37 @@ }, { - "id":"Numbers to the power of two with a fancy data table!", + "id":"Numbers to the power of two with a fancy data table! [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"Math works!","code":[]}, + + {"kind":"where","text":"We use the following data:","code":{"a":["1"],"b":["2"],"c":["1"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Numbers to the power of two with a fancy data table! [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"Math works!","code":[]}, + + {"kind":"where","text":"We use the following data:","code":{"a":["2"],"b":["2"],"c":["4"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Numbers to the power of two with a fancy data table! [2]", "result":"PASS", "duration":"0", "iterations":{ @@ -79,7 +109,7 @@ "blocks":[ {"kind":"expect","text":"Math works!","code":[]}, - {"kind":"where","text":"We use the following data:","code":{"a":["1","2","3"],"b":["2","2","2"],"c":["1","4","9"]}} + {"kind":"where","text":"We use the following data:","code":{"a":["3"],"b":["2"],"c":["9"]}} ], "problems":{"dataValues":[], "errors":[]} } diff --git a/docs/spock/reports/it.Calculus_Stress_Test.json b/docs/spock/reports/it.Calculus_Stress_Test.json index 92de09fcd..c9993fb0b 100644 --- a/docs/spock/reports/it.Calculus_Stress_Test.json +++ b/docs/spock/reports/it.Calculus_Stress_Test.json @@ -4,19 +4,19 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"5", + "runs":"69", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.589 seconds" + "duration":"1.086 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Stress test runs error free and produces expected result", + "id":"Stress test runs error free and produces expected result [0]", "result":"PASS", - "duration":"0.033 seconds", + "duration":"0.183 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -45,9 +45,78 @@ }, { - "id":"Dot operation stress test runs error free and produces expected result", + "id":"Stress test runs error free and produces expected result [1]", "result":"PASS", - "duration":"0.016 seconds", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["def stress = (Tensor t ) -> {"," t = t + Tensor.of( t.shape(), -3d..12d )"," t = t * Tensor.of( t.shape(), 2d..3d )"," t = t / Tensor.of( t.shape(), 1d..2d )"," t = t **Tensor.of( t.shape(), 2d..1d )"," t = t - Tensor.of( t.shape(), -2d..2d )"," return t","}"]}, + + {"kind":"and","text":"","code":["Tensor source = Tensor.of( [3, 3, 3, 3], -1d ).to( device )"]}, + + {"kind":"when","text":"","code":["source.mut[1..2, 0..2, 1..1, 0..2] = Tensor.of( [2, 3, 1, 3], -4d..2d )","Tensor s = source[1..2, 0..2, 1..1, 0d..2d]"]}, + + {"kind":"then","text":"","code":["s.toString() == Tensor.of( [2, 3, 1, 3], -4d..2d ).toString()"]}, + + {"kind":"when","text":"","code":["s = stress(s)"]}, + + {"kind":"then","text":"","code":["s.toString({it.hasSlimNumbers = true}) =="," \"(2x3x1x3):[\" +"," \"198, -6.5, \" +"," \"36, -2.5, \" +"," \"2, 6.5, \" +"," \"\" +"," \"101, 0, \" +"," \"15, 4, \" +"," \"146, 13, \" +"," \"\" +"," \"400, 17, \" +"," \"194, 15.5, \" +"," \"101, -4.5\" +"," \"]\""]}, + + {"kind":"and","text":"","code":["(device instanceof OpenCLDevice) || s.mut.data.get() == [198.0, -6.5, 36.0, -2.5, 2.0, 6.5, 101.0, 0.0, 15.0, 4.0, 146.0, 13.0, 400.0, 17.0, 194.0, 15.5, 101.0, -4.5]","(device instanceof OpenCLDevice) || source.mut.data.get() == [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -4.0, -3.0, -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 2.0, -4.0, -3.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0, -1.0, 0.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 2.0, -4.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -3.0, -2.0, -1.0, -1.0, -1.0, -1.0]"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","Device.get('gpu')"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Dot operation stress test runs error free and produces expected result [0]", + "result":"PASS", + "duration":"0.015 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of( shape, -4d..2d )"]}, + + {"kind":"when","text":"","code":["t = t.convDot( t.T() )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"where","text":"","code":{"shape":["[2, 3]","[2, 3]","[2, 1, 3]","[2, 1, 3]"],"expected":["\"(2x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x1x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x1x1x2):[29.0, 2.0, 2.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Dot operation stress test runs error free and produces expected result [1]", + "result":"PASS", + "duration":"0.007 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of( shape, -4d..2d )"]}, + + {"kind":"when","text":"","code":["t = t.convDot( t.T() )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"where","text":"","code":{"shape":["[2, 3]","[2, 3]","[2, 1, 3]","[2, 1, 3]"],"expected":["\"(2x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x1x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x1x1x2):[29.0, 2.0, 2.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Dot operation stress test runs error free and produces expected result [2]", + "result":"PASS", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -64,9 +133,28 @@ }, { - "id":"The broadcast operation stress test runs error free and produces expected result", + "id":"Dot operation stress test runs error free and produces expected result [3]", "result":"PASS", - "duration":"0.017 seconds", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of( shape, -4d..2d )"]}, + + {"kind":"when","text":"","code":["t = t.convDot( t.T() )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"where","text":"","code":{"shape":["[2, 3]","[2, 3]","[2, 1, 3]","[2, 1, 3]"],"expected":["\"(2x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x1x1x2):[29.0, 2.0, 2.0, 2.0]\"","\"(2x1x1x1x2):[29.0, 2.0, 2.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [0]", + "result":"PASS", + "duration":"0.008 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -87,38 +175,1688 @@ }, { - "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors.", + "id":"The broadcast operation stress test runs error free and produces expected result [1]", "result":"PASS", - "duration":"0.430 seconds", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, - {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, - {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, - {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + {"kind":"then","text":"","code":["t.toString() == expected"]}, - {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, - {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [2]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, - {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, - {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, - {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [3]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [6]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [7]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [8]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [9]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [10]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [11]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [12]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [13]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [14]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The broadcast operation stress test runs error free and produces expected result [15]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["Tensor t1 = Tensor.of( shape1, -4d..2d ).to( device )","Tensor t2 = Tensor.of( shape2, -3d..5d ).to( device )"]}, + + {"kind":"when","text":"","code":["Tensor t = Tensor.of( operation, [t1, t2] )"]}, + + {"kind":"then","text":"","code":["t.toString() == expected"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()","Device.get('gpu')","Device.get('gpu')","CPU.get()","CPU.get()"],"shape1":["[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]","[2, 1]","[2, 3, 1]"],"shape2":["[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]","[2, 2]","[1, 3, 2]"],"operation":["'i0%i1'","'i0%i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0*i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0+i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0-i1'","'i0/i1'","'i0/i1'"],"expected":["\"(2x2):[-1.0, -0.0, -0.0, NaN]\"","\"(2x3x2):[-1.0, -0.0, -0.0, NaN, -0.0, -0.0, -1.0, -1.0, 0.0, NaN, 0.0, 1.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[12.0, 8.0, 3.0, -0.0]\"","\"(2x3x2):[12.0, 8.0, 3.0, -0.0, -2.0, -4.0, 3.0, 2.0, -0.0, 0.0, 1.0, 2.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-7.0, -6.0, -4.0, -3.0]\"","\"(2x3x2):[-7.0, -6.0, -4.0, -3.0, -1.0, 0.0, -4.0, -3.0, -1.0, 0.0, 2.0, 3.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[-1.0, -2.0, -2.0, -3.0]\"","\"(2x3x2):[-1.0, -2.0, -2.0, -3.0, -3.0, -4.0, 2.0, 1.0, 1.0, 0.0, 0.0, -1.0]\"","\"(2x2):[1.33333, 2.0, 3.0, -∞]\"","\"(2x3x2):[1.33333, 2.0, 3.0, -∞, -2.0, -1.0, 0.33333, 0.5, -0.0, NaN, 1.0, 0.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [0]", + "result":"PASS", + "duration":"0.117 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [1]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [2]", + "result":"PASS", + "duration":"0.041 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [3]", + "result":"PASS", + "duration":"0.043 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [4]", + "result":"PASS", + "duration":"0.039 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [5]", + "result":"PASS", + "duration":"0.035 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [6]", + "result":"PASS", + "duration":"0.035 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [7]", + "result":"PASS", + "duration":"0.024 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [8]", + "result":"PASS", + "duration":"0.019 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [9]", + "result":"PASS", + "duration":"0.024 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [10]", + "result":"PASS", + "duration":"0.019 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [11]", + "result":"PASS", + "duration":"0.015 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [12]", + "result":"PASS", + "duration":"0.027 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [13]", + "result":"PASS", + "duration":"0.023 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [14]", + "result":"PASS", + "duration":"0.018 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [15]", + "result":"PASS", + "duration":"0.020 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types, on large prime sized 1D slices and non sliced 1D tensors. [16]", + "result":"PASS", + "duration":"0.016 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 7907","var PRIME_SIZE_2 = 7919"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[9..7915]","t2.mut[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)","var data1 = result1.mut.data.get()","var data2 = result2.mut.data.get()"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["data1.class == result1.dataType.dataArrayType()","data2.class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["data1.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[0]","data2.collect({(it as BigDecimal).round(3)}).sum().round(3) == expected[1]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'gaus(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_tanh(i0)*100 % i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'fast_gaus(i0)+i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'softsign(i0)*100 % i0'","'random(i0)'","'random(i0)'"],"expected":["[2840.044, 2840.044]","[2840.043, 2840.043]","[0.000, 0.000]","[625.914, 625.914]","[625.910, 625.910]","[-20900.000, -20900.000]","[637.151, 637.151]","[637.011, 637.011]","[-20900.000, -20900.000]","[6632.311, 6632.311]","[6632.311, 6632.311]","[-233722189118.000, -233722189118.000]","[647.562, 647.562]","[647.564, 647.564]","[-20900.000, -20900.000]","[93.662, 93.662]","[93.662, 93.662]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [0]", + "result":"PASS", + "duration":"0.009 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [1]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [2]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [3]", + "result":"PASS", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [4]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [5]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [6]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [7]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [8]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [9]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [10]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [11]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [12]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [13]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [14]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [15]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [16]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [17]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [18]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [19]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [20]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [21]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [22]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [23]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [24]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [25]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [26]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [27]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types. [28]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We use a large prime number to size our tensors in order to stress workload divisibility.","code":["var PRIME_SIZE_1 = 3","var PRIME_SIZE_2 = 5"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(PRIME_SIZE_1).andSeed(\"Seitan\")","var t2 = Tensor.of(type).withShape(PRIME_SIZE_2).all(0)[1..3]","t2[0..t2.size-1] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = ( !derive ? func(t1) : func.derive([t1], 0) )","var result2 = ( !derive ? func(t2) : func.derive([t2], 0) )"]}, + + {"kind":"then","text":"First we ensure that both tensors have the correct value/element type.","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The underlying data object should match the data array type as is defined by the data type!","code":["result1.mut.data.get().class == result1.dataType.dataArrayType()","result2.mut.data.get().class == result2.dataType.dataArrayType()"]}, + + {"kind":"and","text":"The data of the first non slice tensor as well as its slice should be as expected.","code":["result1.items == expected","result2.items == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer"],"funExpression":["'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'silu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'gelu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'selu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gatu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'","'gasu(i0)'"],"derive":["false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true","false","false","false","true","true","true"],"expected":["[1.0985150624263118, 2.331551300795844, 0.08745752408303246] as double[]","[1.098515, 2.3315513, 0.08745752] as float[]","[2124371342, 0, 0] as int[]","[1.0198659569612678, 1.0992238228008295, 0.5805713104936336] as double[]","[1.019866, 1.0992239, 0.5805713] as float[]","[1, 0, 0] as int[]","[1.2553019101258691, 2.48514859714065, 0.09199892841280806] as double[]","[1.255302, 2.4851487, 0.09199893] as float[]","[2124371342, 0, 0] as int[]","[1.09968352899801, 1.043758795430269, 0.6360091016582581] as double[]","[1.0996835, 1.0437589, 0.6360091] as float[]","[1, 0, 0] as int[]","[1.4457526798842053, 2.6470118580557593, 0.17005220305511268] as double[]","[1.4457527, 2.647012, 0.1700522] as float[]","[-2062888229, -2, -2] as int[]","[1.0507009873554805, 1.0507009873554805, 1.0507009873554805] as double[]","[1.050701, 1.050701, 1.050701] as float[]","[1, 0, 0] as int[]","[0.9891407665275838, 0.9999999999999742, 0.004239423130809827] as double[]","[0.98914075, 1.0, 0.0042394227] as float[]","[1, -1, -1] as int[]","[0.1226918386004856, 9.805489753489383E-13, 0.07858138767615172] as double[]","[0.12269211, 0.0, 0.078581385] as float[]","[0, 0, 0] as int[]","[0.7226245060456667, 0.9411395236107959, 0.004221551478848414] as double[]","[0.72262454, 0.9411396, 0.004221551] as float[]","[1, -1, -1] as int[]","[0.4370057619908791, 0.06596632547000601, 0.07792071781374522] as double[]","[0.43700573, 0.06596632, 0.07792072] as float[]","[0, 0, 0] as int[]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Activation functions work across types.", + "id":"Activation functions work across types. [29]", "result":"PASS", - "duration":"0.017 seconds", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/it.Cross_Device_Sliced_Tensor_System_Test.json b/docs/spock/reports/it.Cross_Device_Sliced_Tensor_System_Test.json index f633b7f52..79d8532ec 100644 --- a/docs/spock/reports/it.Cross_Device_Sliced_Tensor_System_Test.json +++ b/docs/spock/reports/it.Cross_Device_Sliced_Tensor_System_Test.json @@ -4,19 +4,19 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"2", + "runs":"4", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.070 seconds" + "duration":"0.146 seconds" }, "headers":[" \n

        \n This specification covers the behavior of tensors when being sliced\n on multiple different device types in conjunction with \n the autograd system.\n Autograd should work on slices as well. \n

        \n "],"tags":{},"see":[], "features":[ { - "id":"Slices can be created using the SliceBuilder.", + "id":"Slices can be created using the SliceBuilder. [0]", "result":"PASS", - "duration":"0.023 seconds", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -43,9 +43,107 @@ }, { - "id":"Cross device sliced tensor integration test runs without errors.", + "id":"Slices can be created using the SliceBuilder. [1]", "result":"PASS", - "duration":"0.040 seconds", + "duration":"0.039 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"and","text":"","code":["if ( device == null ) return","Neureka.get().settings().autograd().isApplyingGradientWhenTensorIsUsed = false","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)","if ( device instanceof OpenCLDevice && !Neureka.get().canAccessOpenCLDevice() ) return"]}, + + {"kind":"and","text":"A tensor which ought to be sliced:","code":["var a = Tensor.of([4, 6], ["," 1d, 2d, 3d, 4d, 5d, 6d,"," 7d, 8d, 9d, 1d, 2d, 3d,"," 4d, 5d, 6d, 7d, 8d, 9d,"," 1d, 2d, 3d, 4d, 5d, 6d"," ])","device.store(a)"]}, + + {"kind":"when","text":"","code":["var b = a.slice() // [-1..-3, -6..-3]"," .axis(0).from(-1).to(-3)"," .axis(1).from(-6).to(-3)"," .get()","var s = a.slice() // [1, -2]"," .axis(0).at(1)"," .axis(1).at(-2)"," .get()","s.rqsGradient = true"]}, + + {"kind":"then","text":"","code":["s.toString() == \"(1x1):[2.0]:g:[null]\"","s.item(0) == 2.0","s.rqsGradient()","b.toString().contains(\"7.0, 8.0, 9.0, 1.0, 4.0, 5.0, 6.0, 7.0, 1.0, 2.0, 3.0, 4.0\")","b.spread() != null"]}, + + {"kind":"when","text":"","code":["var y = ( s * 4 ) ** 1.5"]}, + + {"kind":"then","text":"","code":["y.toString() == '(1x1):[22.6274]; ->d(1x1):[16.9706]'"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["Device.get('gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Cross device sliced tensor integration test runs without errors. [0]", + "result":"PASS", + "duration":"0.089 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["if ( device == null ) return"]}, + + {"kind":"and","text":"For this test we tell the CL-Backend to auto-convert to floats.","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().autograd().isApplyingGradientWhenTensorIsUsed = false","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","if ( device instanceof OpenCLDevice && !Neureka.get().canAccessOpenCLDevice() ) return"]}, + + {"kind":"when","text":"","code":["var x = Tensor.of([1], 3d).setRqsGradient(true)","var b = Tensor.of([1], -4d)","var w = Tensor.of([1], 2d)","device.store(x).store(b).store(w)","var y = Tensor.of(\"((i0+i1)*i2)**2\", [x, b, w])"]}, + + {"kind":"then","text":"","code":["y.indicesMap() != null","y.toString().contains(\"[1]:(4.0); ->d[1]:(-8.0)\")"]}, + + {"kind":"when","text":"","code":["y.backward(Tensor.of(2d))","y = ( ( x + b ) * w )**2"]}, + + {"kind":"then","text":"","code":["y.toString().contains(\"[1]:(4.0); ->d[1]:(-8.0)\")"]}, + + {"kind":"when","text":"","code":["y.backward(Tensor.of(2d))","x.toString().contains(\"-32.0\")","y = b + w * x","var a = Tensor.of([4, 6], ["," 1d, 2d, 3d, 4d, 5d, 6d,"," 7d, 8d, 9d, 1d, 2d, 3d,"," 4d, 5d, 6d, 7d, 8d, 9d,"," 1d, 2d, 3d, 4d, 5d, 6d"," ])","device.store(a)","b = a[[-1..-3, -6..-3]]","var s = a[[1, -2]]"]}, + + {"kind":"then","text":"","code":["s.toString() == \"[1x1]:(2.0)\"","s.item(0) == 2.0","b.toString().contains(\"7.0, 8.0, 9.0, 1.0, 4.0, 5.0, 6.0, 7.0, 1.0, 2.0, 3.0, 4.0\")","b.spread() != null"]}, + + {"kind":"when","text":"","code":["b = a[-3..-1, 0..3]","s = a[1, -2]"]}, + + {"kind":"then","text":"","code":["s.toString() == \"[1x1]:(2.0)\"","s.item(0) == 2.0","s.getDataAt(0) == 1.0","s.getDataAt(1) == 2.0","b.toString().contains(\"7.0, 8.0, 9.0, 1.0, 4.0, 5.0, 6.0, 7.0, 1.0, 2.0, 3.0, 4.0\")","b.spread() != null"]}, + + {"kind":"when","text":"","code":["if( device instanceof DummyDevice ) {"," a.getDataAs( double[].class )[1] = a.getDataAs( double[].class )[1] * 6"," a.getDataAs( double[].class )[7] = a.getDataAs( double[].class )[7] * 2","} else {"," var k = Tensor.of([4, 6], ["," 1d, 6d, 1d, 1d,"," 1d, 1d, 1d, 2d,"," 1d, 1d, 1d, 1d,"," 1d, 1d, 1d, 1d,"," 1d, 1d, 1d, 1d,"," 1d, 1d, 1d, 1d"," ])"," device.store( k )"," a.mut[] = a * k","}"]}, + + {"kind":"then","text":"","code":["b.toString().contains(\"7.0, 16.0, 9.0, 1.0, 4.0, 5.0, 6.0, 7.0, 1.0, 2.0, 3.0, 4.0\")"]}, + + {"kind":"when","text":"","code":["var c = Tensor.of([3, 4], ["," -3d, 2d, 3d,"," 5d, 6d, 2d,"," -1d, 1d, 2d,"," 3d, 4d, 2d,"," ])","var d = b + c"]}, + + {"kind":"then","text":"","code":["(d.NDConf.asInlineArray() as List) == ( [3, 4, 4, 1, 4, 1, 0, 0, 1, 1] )","(b.NDConf.asInlineArray() as List) == ( [3, 4, 6, 1, 4, 1, 1, 0, 1, 1] )","(c.NDConf.asInlineArray() as List) == ( [3, 4, 4, 1, 4, 1, 0, 0, 1, 1] )","d.toString().contains("," \"4.0, 18.0, 12.0, 6.0, \"+"," \"10.0, 7.0, 5.0, 8.0, \"+"," \"3.0, 5.0, 7.0, 6.0\"",")"]}, + + {"kind":"when","text":"","code":["b = a[1..3, 2..4]"]}, + + {"kind":"then","text":"","code":["b.toString().contains(\"9.0, 1.0, 2.0, 6.0, 7.0, 8.0, 3.0, 4.0, 5.0\")","b.spread() != null"]}, + + {"kind":"when","text":"","code":["b = a[[[0..3]:2, [1..4]:2]]"]}, + + {"kind":"then","text":"","code":["b.toString().contains( \"12.0, 4.0, 5.0, 7.0\" )","b.spread() != null"]}, + + {"kind":"when","text":"","code":["var p = Tensor.of([2, 2], [2d, 55d, 4d, 7d]).to((device instanceof DummyDevice)?null:device)","var u = Tensor.of([2, 2], [5d, 2d, 7d, 34d]).to((device instanceof DummyDevice)?null:device)","p.mut[] = u"]}, + + {"kind":"then","text":"","code":["p.toString().contains(\"5.0, 2.0, 7.0, 34.0\")"]}, + + {"kind":"when","text":"","code":["a.mut[[[0..3]:2, [1..4]:2]] = Tensor.of([2, 2], [1d, 2d, 3d, 4d])"]}, + + {"kind":"then","text":"","code":["b.toString().contains(\"1.0, 2.0, 3.0, 4.0\")","a.toString().contains("," \"1.0, 1.0, 3.0, 2.0, 5.0, 6.0, \" +"," \"7.0, 16.0, 9.0, 1.0, 2.0, 3.0, \" +"," \"4.0, 3.0, 6.0, 4.0, 8.0, 9.0, \" +"," \"1.0, 2.0, 3.0, 4.0, 5.0, 6.0\"",")"]}, + + {"kind":"when","text":"","code":["a.mut[1..2, 1..2] = Tensor.of([2, 2], [8, 8, 8, 8])"]}, + + {"kind":"then","text":"","code":["b.toString().contains("," \"1.0, 2.0, \"+"," \"8.0, 4.0\"",")","a.toString().contains("," \"1.0, 1.0, 3.0, 2.0, 5.0, 6.0, \" +"," \"7.0, 8.0, 8.0, 1.0, 2.0, 3.0, \" +"," \"4.0, 8.0, 8.0, 4.0, 8.0, 9.0, \" +"," \"1.0, 2.0, 3.0, 4.0, 5.0, 6.0\"",")"]}, + + {"kind":"when","text":"","code":["b.setRqsGradient(true)","c = Tensor.of([2, 2], ["," -2, 3,//-2 + 24 + 3 + 8"," 1, 2,"," ])","device.store(b).store(c) // -2 + 6 + 8 + 8 = 22","x = Tensor.of(b, \"x\", c) // This test is important because it tests convolution on slices!"]}, + + {"kind":"then","text":"","code":["x.item() == 20"]}, + + {"kind":"and","text":"","code":["x.toString().replace(\".0\", \"\").contains(\"->d[2x2]:(-2, 3, 1, 2)\")"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","Device.get('gpu')"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Cross device sliced tensor integration test runs without errors. [1]", + "result":"PASS", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/it.Cross_Device_Spec.json b/docs/spock/reports/it.Cross_Device_Spec.json index 08b813f26..6269df7ca 100644 --- a/docs/spock/reports/it.Cross_Device_Spec.json +++ b/docs/spock/reports/it.Cross_Device_Spec.json @@ -4,19 +4,19 @@ "narrative":"This specification is pretty much a system test which covers\n the behavior of the library as a whole across multiple devices!\n No matter which device is being used for a given stress test, the result should be the same...", "subjects":["neureka.devices.Device"], "statistics":{ - "runs":"5", + "runs":"79", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"8.704 seconds" + "duration":"6.066 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Convolution can model matrix multiplications across devices.", + "id":"Convolution can model matrix multiplications across devices. [0]", "result":"PASS", - "duration":"0.009 seconds", + "duration":"0.033 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,9 +37,51 @@ }, { - "id":"Cross device system test runs successfully.", + "id":"Convolution can model matrix multiplications across devices. [1]", "result":"PASS", - "duration":"5.102 seconds", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A given device of any type and the settings configured for testing.","code":["Device device = ( deviceType == \"CPU\" ) ? CPU.get() : Device.get('first')","Neureka.get().reset()","Neureka.get().settings().debug().isKeepingDerivativeTargetPayloads = true","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one does not.","code":["var tensor1 = Tensor.of(Shape.of(2, 2, 1),"," Data.of("," 1f, 2f, // 3, 1,"," 2f, -3f, // -2, -1,"," ))"," .setRqsGradient( true )","var tensor2 = Tensor.of(Shape.of(1, 2, 2),"," Data.of("," -2f, 3f, // 0 7"," 1f, 2f, // -7 0"," ))","device.store(tensor1).store(tensor2)"]}, + + {"kind":"and","text":"","code":["Tensor product = Tensor.of(\"i0xi1\", tensor1, tensor2)","product.backward( Tensor.of(Shape.of(2, 1, 2), Data.of(1, 1, 1, 1)) )","String result = product.toString({"," it.rowLimit = 15 // \"rc\""," it.isScientific = false"," it.isMultiline = false"," it.hasGradient = false"," it.cellSize = 1"," it.hasValue = true"," it.hasRecursiveGraph = true"," it.hasDerivatives = false"," it.hasShape = true"," it.isCellBound = false"," it.postfix = \"\""," it.prefix = \"\""," it.hasSlimNumbers = false","})"]}, + + {"kind":"expect","text":"","code":["result.contains("," \"[2x1x2]:(0.0, 7.0, -7.0, 0.0); =>d|[ [1x2x2]:(-2.0, 3.0, 1.0, 2.0) ]|:t{ [2x2x1]:(1.0, 2.0, 2.0, -3.0) }\"",")"]}, + + {"kind":"cleanup","text":"","code":["product.mut.delete()","tensor1.mut.delete()","tensor2.mut.delete()"]}, + + {"kind":"where","text":"The following settings are being used: ","code":{"deviceType":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Cross device system test runs successfully. [0]", + "result":"PASS", + "duration":"3.278 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A given device of any type and the settings configured for testing.","code":["Device device = ( deviceType == \"CPU\" ) ? CPU.get() : Device.get('first')","Neureka.get().settings().debug().isKeepingDerivativeTargetPayloads = true","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"expect","text":"The integration test runs successful.","code":["CrossDeviceSystemTest.on(device)"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"The following settings are being used: ","code":{"deviceType":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Cross device system test runs successfully. [1]", + "result":"PASS", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -56,9 +98,32 @@ }, { - "id":"Test simple NN implementation with manual backprop", + "id":"Test simple NN implementation with manual backprop [0]", + "result":"PASS", + "duration":"2.579 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"expect","text":"","code":["device != null"]}, + + {"kind":"and","text":"","code":["new SimpleNNSystemTest(SimpleNNSystemTest.Mode.CONVOLUTION).on(device)"]}, + + {"kind":"and","text":"","code":["if ( !(device instanceof OpenCLDevice) )"," new SimpleNNSystemTest(SimpleNNSystemTest.Mode.MAT_MUL).on(device)"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend().find(CLBackend).ifPresent{ it.getSettings().autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","Device.get('first gpu')"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test simple NN implementation with manual backprop [1]", "result":"PASS", - "duration":"3.512 seconds", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -79,7 +144,7 @@ }, { - "id":"A gradient of ones can be set by calling the backward method on a tensor sitting on any device.", + "id":"A gradient of ones can be set by calling the backward method on a tensor sitting on any device. [0]", "result":"PASS", "duration":"0.004 seconds", "iterations":{ @@ -106,7 +171,61 @@ }, { - "id":"Mapping tensors works for every device (even if they are not used).", + "id":"A gradient of ones can be set by calling the backward method on a tensor sitting on any device. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We use the legacy representation of tensors for this little test!","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"We create a small matrix of 4 fours which requires a gradient and is stored on the provided device!","code":["Tensor t = Tensor.of([2, 2], 4d).setRqsGradient(true).to(device)"]}, + + {"kind":"when","text":"We now call the backward method on the tensor directly without having done any operations...","code":["t.backward(1)"]}, + + {"kind":"and","text":"Then we take the gradient to see what happened.","code":["Tensor g = t.gradient.get()"]}, + + {"kind":"then","text":"We expect this gradient to be all ones with the shape of our matrix!","code":["g.toString().contains(\"[2x2]:(1.0, 1.0, 1.0, 1.0)\")","t.toString().contains(\"[2x2]:(4.0, 4.0, 4.0, 4.0):g:(1.0, 1.0, 1.0, 1.0)\")"]}, + + {"kind":"and","text":"","code":["t.isOutsourced() == !(device instanceof CPU)","g.isOutsourced() == !(device instanceof CPU)"]}, + + {"kind":"and","text":"","code":["t.device == device","g.device == device"]}, + + {"kind":"where","text":"","code":{"device":["new DummyDevice()","Device.get('first gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A gradient of ones can be set by calling the backward method on a tensor sitting on any device. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We use the legacy representation of tensors for this little test!","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"We create a small matrix of 4 fours which requires a gradient and is stored on the provided device!","code":["Tensor t = Tensor.of([2, 2], 4d).setRqsGradient(true).to(device)"]}, + + {"kind":"when","text":"We now call the backward method on the tensor directly without having done any operations...","code":["t.backward(1)"]}, + + {"kind":"and","text":"Then we take the gradient to see what happened.","code":["Tensor g = t.gradient.get()"]}, + + {"kind":"then","text":"We expect this gradient to be all ones with the shape of our matrix!","code":["g.toString().contains(\"[2x2]:(1.0, 1.0, 1.0, 1.0)\")","t.toString().contains(\"[2x2]:(4.0, 4.0, 4.0, 4.0):g:(1.0, 1.0, 1.0, 1.0)\")"]}, + + {"kind":"and","text":"","code":["t.isOutsourced() == !(device instanceof CPU)","g.isOutsourced() == !(device instanceof CPU)"]}, + + {"kind":"and","text":"","code":["t.device == device","g.device == device"]}, + + {"kind":"where","text":"","code":{"device":["new DummyDevice()","Device.get('first gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [0]", "result":"PASS", "duration":"0.004 seconds", "iterations":{ @@ -130,6 +249,1869 @@ {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} ], "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [8]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [16]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [17]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [18]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [19]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [20]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [21]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [22]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [23]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [24]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [25]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [26]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [27]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [28]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [29]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [30]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [31]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [32]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [33]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [34]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [35]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [36]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [37]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [38]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [39]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [40]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [41]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [42]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [43]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [44]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [45]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [46]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [47]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [48]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [49]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [50]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [51]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [52]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [53]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [54]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [55]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [56]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [57]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [58]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [59]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [60]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [61]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [62]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [63]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [64]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [65]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [66]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [67]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [68]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Mapping tensors works for every device (even if they are not used). [69]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first make a note of the type we started with.","code":["var originalType = tensor.itemType()"]}, + + {"kind":"when","text":"","code":["when : \"\"\""," We start off by storing the provided tensor on the provided device."," This might be any kind of device like for example an $OpenCLDevice."," Which means the tensor might not be sitting in RAM!"," \"\"\"","tensor.to(device)"]}, + + {"kind":"then","text":"After the tensor is stored on the device, we expect it to be still of the original type.","code":["tensor.itemType == originalType"]}, + + {"kind":"when","text":"\n We call the mapping method which is supposed to create a new tensor of the provided type.\n This procedure is only supported when the tensor is stored in RAM, so when\n the tensor is outsourced (stored on a device), then we expect that the mapping method\n temporarily migrates the tensor back and forth internally...\n ","code":["Tensor result = tensor.mapTo(target, lambda)"]}, + + {"kind":"then","text":"We expect the String representation of the tensor to be as expected!","code":["result.toString() == expected"]}, + + {"kind":"and","text":"We expect the result to have the expected target class!","code":["result.itemType == target"]}, + + {"kind":"and","text":"Lastly, the original tensor used as mapping source should be stored on the original device!","code":["tensor.isOutsourced() == !(device instanceof CPU)","tensor.device == device"]}, + + {"kind":"where","text":"We use the following data to test this mapping for a wide range of types and values!","code":{"tensor":["Tensor.of(3.5)","Tensor.of(3.5)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofFloats().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofShorts().scalar(3.5f)","Tensor.ofBytes().scalar(2.7)","Tensor.ofBytes().scalar(2.7)","Tensor.ofInts().scalar(6.1f)","Tensor.ofInts().scalar(6.1f)","Tensor.of( 3.0 )","Tensor.of(-1.0 )","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.of(3.0 )","Tensor.of(-1.0)","Tensor.of(0.5)","Tensor.of(0.7)","Tensor.of(0.9)","Tensor.of(3.8)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofFloats().scalar( 3f )","Tensor.ofFloats().scalar(-1f )","Tensor.ofFloats().scalar(0.5f)","Tensor.ofFloats().scalar(0.7f)","Tensor.ofFloats().scalar(0.9f)","Tensor.ofFloats().scalar(3.8f)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofInts().scalar( 3 )","Tensor.ofInts().scalar(-1 )","Tensor.ofInts().scalar( 5 )","Tensor.ofInts().scalar( 70)","Tensor.ofInts().scalar( 90)","Tensor.ofInts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofShorts().scalar( 3 )","Tensor.ofShorts().scalar(-1 )","Tensor.ofShorts().scalar( 5 )","Tensor.ofShorts().scalar( 70)","Tensor.ofShorts().scalar( 90)","Tensor.ofShorts().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)","Tensor.ofBytes().scalar( 3 )","Tensor.ofBytes().scalar(-1 )","Tensor.ofBytes().scalar( 5 )","Tensor.ofBytes().scalar( 70)","Tensor.ofBytes().scalar( 90)","Tensor.ofBytes().scalar( 37)"],"device":["CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","Device.get('first')","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()","CPU.get()"],"target":["String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","String.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class","Double.class","Float.class","Integer.class","Long.class","Byte.class","Short.class"],"lambda":["{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{\"~$it\"}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}","{it*it}","{it/2}","{it*10}","{it*5}","{it*2}","{it/2}"],"expected":["'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3.5]'","'(1):[~3]'","'(1):[~3]'","'(1):[~2]'","'(1):[~2]'","'(1):[~6]'","'(1):[~6]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[5]'","'(1):[3]'","'(1):[1]'","'(1):[1]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'","'(1):[9.0]'","'(1):[-0.5]'","'(1):[50]'","'(1):[350]'","'(1):[-76]'","'(1):[18]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} } ], diff --git a/docs/spock/reports/it.Eleven_Lines_NN_System_Spec.json b/docs/spock/reports/it.Eleven_Lines_NN_System_Spec.json index bba1c7bd5..98734156a 100644 --- a/docs/spock/reports/it.Eleven_Lines_NN_System_Spec.json +++ b/docs/spock/reports/it.Eleven_Lines_NN_System_Spec.json @@ -9,21 +9,21 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.281 seconds" + "duration":"0.491 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"One can write a simple neural network with custom back-prop in 11 lines of code!", "result":"PASS", - "duration":"0.064 seconds", + "duration":"0.109 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ {"kind":"given","text":"","code":["var X = Tensor.of(Double, [[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])","var y = Tensor.of(Double, [[0, 1, 1, 0]]).T","var W1 = Tensor.ofRandom(Double, 3, 4)","var W2 = Tensor.ofRandom(Double, 4, 1)","60.times {"," var l1 = Tensor.of('sig(', X.matMul(W1), ')')"," var l2 = Tensor.of('sig(', l1.matMul(W2), ')')"," var l2_delta = (y - l2) * (l2 * (-l2 + 1))"," var l1_delta = l2_delta.matMul(W2.T) * (l1 * (-l1 + 1))"," W2 += l1.T.matMul(l2_delta)"," W1 += X.T.matMul(l1_delta)","}"]}, - {"kind":"expect","text":"","code":["W1.mut.data.get().collect({it.round 14}) == RESULT_W1.collect({it.round 14})","W2.mut.data.get().collect({it.round 14}) == RESULT_W2.collect({it.round 14})"]} + {"kind":"expect","text":"","code":["W1.mut.data.get().collect({it.round 12}) == RESULT_W1.collect({it.round 12})","W2.mut.data.get().collect({it.round 12}) == RESULT_W2.collect({it.round 12})"]} ], "problems":{"dataValues":[], "errors":[]} }, @@ -31,14 +31,14 @@ { "id":"One can write a simple neural network in less than 11 lines of code!", "result":"PASS", - "duration":"0.078 seconds", + "duration":"0.122 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ {"kind":"given","text":"","code":["var X = Tensor.of(Double, [[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])","var y = Tensor.of(Double, [[0, 1, 1, 0]]).T","var W1 = Tensor.ofRandom(Double, 3, 4).setRqsGradient(true)","var W2 = Tensor.ofRandom(Double, 4, 1).setRqsGradient(true)","60.times {"," var l2 = Tensor.of('sig(',Tensor.of('sig(',X.matMul(W1),')').matMul(W2),')')"," l2.backward(y - l2)"," W1.applyGradient(); W2.applyGradient()","}"]}, - {"kind":"expect","text":"","code":["W1.mut.data.get().collect({it.round 14}) == RESULT_W1.collect({it.round 14})","W2.mut.data.get().collect({it.round 14}) == RESULT_W2.collect({it.round 14})"]} + {"kind":"expect","text":"","code":["W1.mut.data.get().collect({it.round 12}) == RESULT_W1.collect({it.round 12})","W2.mut.data.get().collect({it.round 12}) == RESULT_W2.collect({it.round 12})"]} ], "problems":{"dataValues":[], "errors":[]} }, @@ -46,7 +46,7 @@ { "id":"One can write a simple float based neural network in less than 11 lines of java like code!", "result":"PASS", - "duration":"0.067 seconds", + "duration":"0.128 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -61,7 +61,7 @@ { "id":"One can write a simple double based neural network in less than 11 lines of java like code using the \"@\" operator!", "result":"PASS", - "duration":"0.068 seconds", + "duration":"0.125 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -76,7 +76,7 @@ { "id":"The pseudo random number generator works as expected for the weights used in the 11 line NN examples!", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/st.Benchmark_System_Test.json b/docs/spock/reports/st.Benchmark_System_Test.json index 6c445f79f..be0203252 100644 --- a/docs/spock/reports/st.Benchmark_System_Test.json +++ b/docs/spock/reports/st.Benchmark_System_Test.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.910 seconds" + "duration":"1.900 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"Tensor can be constructed by passing List instances.", "result":"PASS", - "duration":"0.150 seconds", + "duration":"0.822 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -39,7 +39,7 @@ { "id":"Test benchmark script and simple tensor constructor.", "result":"PASS", - "duration":"0.758 seconds", + "duration":"1.074 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/st.Broad_System_Test.json b/docs/spock/reports/st.Broad_System_Test.json index 68906cd38..fc89e3439 100644 --- a/docs/spock/reports/st.Broad_System_Test.json +++ b/docs/spock/reports/st.Broad_System_Test.json @@ -4,19 +4,19 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"1", + "runs":"4", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.033 seconds" + "duration":"0.083 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The long broad integration test runs successfully.", "result":"PASS", - "duration":"0.030 seconds", + "duration":"0.040 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -24,6 +24,81 @@ {"kind":"expect","text":"The integration test runs without exceptions or assertion errors.","code":["BroadSystemTest.on() // This is the actual test."]} ], "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A function with expression \"softplus((I[0]xI[1])*-100)\" can be backpropagated.", + "result":"PASS", + "duration":"0.011 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true);","Tensor tensor1 = Tensor.of(Shape.of(1, 3), 2d);","Tensor tensor2 = Tensor.of(Double).withShape(2, 1).all(-1.0);","tensor1.setRqsGradient(true);","tensor2.setRqsGradient(true);"]}, + + {"kind":"when","text":"","code":["Tensor result1 = Tensor.of(\"softplus((I[0]xI[1])*-100)\", [tensor1, tensor2]);","Tensor result2 = (Tensor.of(\"i0 x i1\", tensor1, tensor2)*-100).softplus();"]}, + + {"kind":"then","text":"","code":["result1.toString() == \"[2x3]:(200.0, 200.0, 200.0, 200.0, 200.0, 200.0); ->d[2x3]:(-100.0, -100.0, -100.0, -100.0, -100.0, -100.0)\"","result2.toString() == \"[2x3]:(200.0, 200.0, 200.0, 200.0, 200.0, 200.0); ->d[2x3]:(-100.0, -100.0, -100.0, -100.0, -100.0, -100.0)\""]}, + + {"kind":"when","text":"We perform a backwards pass of a gradient of `-0.1`:","code":["result1.backward( -0.1 );"]}, + + {"kind":"then","text":"","code":["tensor1.gradient.get().toString() == \"[1x3]:(-20.0, -20.0, -20.0)\"","tensor2.gradient.get().toString() == \"[2x1]:(60.0, 60.0)\""]}, + + {"kind":"when","text":"We perform a backwards pass of a gradient of `-0.1`:","code":["result2.backward( -0.1 );"]}, + + {"kind":"then","text":"","code":["tensor1.gradient.get().toString() == \"[1x3]:(-40.0, -40.0, -40.0)\"","tensor2.gradient.get().toString() == \"[2x1]:(120.0, 120.0)\""]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A function with expression \"softplus(tanh(I[0]*I[1]*2)*I[1])\" can be backpropagated.", + "result":"PASS", + "duration":"0.016 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true);","Tensor tensor1 = Tensor.of(Shape.of(2), 2d);","Tensor tensor2 = Tensor.of(Shape.of(2), 4d);","tensor1.setRqsGradient(true);","tensor2.setRqsGradient(true);"]}, + + {"kind":"when","text":"","code":["Tensor result1 = Tensor.of(\"softplus(tanh(I[0]*I[1]*2)*I[1])\", [tensor1, tensor2]);","Tensor result2 = ((tensor1 * tensor2 * 2).tanh()*tensor2).softplus();"]}, + + {"kind":"then","text":"","code":["result1.toString({it.hasDerivatives=false}) == \"[2]:(4.01815, 4.01815)\"","result2.toString({it.hasDerivatives=false}) == \"[2]:(4.01815, 4.01815)\""]}, + + {"kind":"when","text":"We perform a backwards pass of a gradient of `100`:","code":["result1.backward( 100 );"]}, + + {"kind":"then","text":"","code":["tensor1.gradient.get().toString() == \"[2]:(159.09e-12, 159.09e-12)\"","tensor2.gradient.get().toString() == \"[2]:(98.2014, 98.2014)\""]}, + + {"kind":"when","text":"We perform a backwards pass of a gradient of `100`:","code":["result2.backward( 100 );"]}, + + {"kind":"then","text":"","code":["tensor1.gradient.get().toString() == \"[2]:(318.18e-12, 318.18e-12)\"","tensor2.gradient.get().toString() == \"[2]:(196.403, 196.403)\""]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A function with expression \"(-3*(2*(i0*-1)))*(-1*i0)\" can be backpropagated.", + "result":"PASS", + "duration":"0.010 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true);","Tensor tensor1 = Tensor.of(Shape.of(1), 2d);//-2*4 = 8 | *3 = -24","tensor1.setRqsGradient(true);"]}, + + {"kind":"when","text":"","code":["Tensor result1 = Tensor.of(\"(-3*(2*(i0*-1)))*(-1*i0)\", [tensor1]);","Tensor result2 = ((((tensor1*-1)*2)*-3)*(tensor1*-1));"]}, + + {"kind":"then","text":"","code":["result1.toString({it.hasDerivatives=false}) == \"[1]:(-24.0)\"","result2.toString({it.hasDerivatives=false}) == \"[1]:(-24.0)\""]}, + + {"kind":"when","text":"We perform a backwards pass of a gradient of `2`:","code":["result1.backward( 2 );"]}, + + {"kind":"then","text":"","code":["tensor1.gradient.get().toString() == \"[1]:(-48.0)\""]}, + + {"kind":"when","text":"We perform a backwards pass of a gradient of `2`:","code":["result2.backward( 2 );"]}, + + {"kind":"then","text":"","code":["tensor1.gradient.get().toString() == \"[1]:(-96.0)\""]} + ], + "problems":{"dataValues":[], "errors":[]} } ], diff --git a/docs/spock/reports/st.NN_Concepts_Spec.json b/docs/spock/reports/st.NN_Concepts_Spec.json index e507df792..29ee88929 100644 --- a/docs/spock/reports/st.NN_Concepts_Spec.json +++ b/docs/spock/reports/st.NN_Concepts_Spec.json @@ -1,7 +1,7 @@ { "className":"st.NN_Concepts_Spec", "title":"Examining Neural Network Architecture Snippets", - "narrative":"This specification is intended to showcase some basic building blocks of\n various neural network architectures.", + "narrative":"This specification is intended to showcase some basic building blocks of \n various neural network architectures.", "subjects":[], "statistics":{ "runs":"1", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.140 seconds" + "duration":"0.201 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The attention mechanism (found in the commonly known transformer) demonstrated.", "result":"PASS", - "duration":"0.138 seconds", + "duration":"0.200 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n The attention mechanism is a core component of the transformer architecture and\n most likely the reason why it is so successful in natural language processing.\n\n Here you can see that the query and key weight matrices are trained\n if there is only one input vector.\n "] }, diff --git a/docs/spock/reports/st.Training_NNs_Spec.json b/docs/spock/reports/st.Training_NNs_Spec.json index bd027179a..fb1738bfc 100644 --- a/docs/spock/reports/st.Training_NNs_Spec.json +++ b/docs/spock/reports/st.Training_NNs_Spec.json @@ -1,22 +1,22 @@ { "className":"st.Training_NNs_Spec", "title":"Training a Neural Network Class", - "narrative":"When designing larger neural network architectures, what you would usually do is\n to create a class that represents the whole model (which itself might be composed\n of smaller models).\n\n This class would then represent something that can be executed and then trained.\n This Specification shows how to instantiate, execute and train various\n pre-defined example neural network models.", + "narrative":"When designing larger neural network architectures, what you would usually do is\n to create a class that represents the whole model (which itself might be composed\n of smaller models). \n\n This class would then represent something that can be executed and then trained.\n This Specification shows how to instantiate, execute and train various \n pre-defined example neural network models.", "subjects":[], "statistics":{ - "runs":"3", + "runs":"5", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"7.071 seconds" + "duration":"12.622 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can run the attention head test model.", "result":"PASS", - "duration":"5.276 seconds", + "duration":"9.452 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n This little test simply executes the `QuasiMultiHeadAttention` model class\n and checks if the loss is decreasing over time.\n You can check out how this is implemented in the `QuasiMultiHeadAttention` class.\n Here you will only see how the training is executed.\n "] }, @@ -35,7 +35,7 @@ { "id":"A simple 3 layer neural network converges.", "result":"PASS", - "duration":"1.331 seconds", + "duration":"2.328 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -50,9 +50,55 @@ }, { - "id":"A very simple 1 layer NN converges.", + "id":"A very simple 1 layer NN converges. [0]", "result":"PASS", - "duration":"0.458 seconds", + "duration":"0.144 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var inputs = Tensor.ofFloats().withShape( 2, 6 ).andFill(-4f..3f)","var weights = Tensor.ofRandom(Float, 6, 1)","var targets = Tensor.of( 0.2f, -0.1f, 0.5f, 1.2f, -0.3f, 0.2f ).reshape( 2, 1 )"]}, + + {"kind":"and","text":"","code":["weights.setRqsGradient( true )","applyOptimizer.accept(weights)"]}, + + {"kind":"and","text":"","code":["var pred","var losses = []"]}, + + {"kind":"when","text":"","code":["100.times {"," pred = inputs.matMul( weights ).tanh()"," var loss = ((pred - targets)**2).sum()"," loss.backward()"," weights.applyGradient()"," losses << loss.item()","}"]}, + + {"kind":"then","text":"","code":["pred.shape == [2, 1]","losses[0] > losses[losses.size()-1]","losses[0] > 2","losses[losses.size()-1] < 0.5"]}, + + {"kind":"where","text":"","code":{"applyOptimizer":["{ it.set(Optimizer.SGD.withLearningRate(0.03)) }","{ it.set(Optimizer.ADAM) }","{ it.set(Optimizer.RMSProp.withLearningRate(0.05)) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A very simple 1 layer NN converges. [1]", + "result":"PASS", + "duration":"0.434 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var inputs = Tensor.ofFloats().withShape( 2, 6 ).andFill(-4f..3f)","var weights = Tensor.ofRandom(Float, 6, 1)","var targets = Tensor.of( 0.2f, -0.1f, 0.5f, 1.2f, -0.3f, 0.2f ).reshape( 2, 1 )"]}, + + {"kind":"and","text":"","code":["weights.setRqsGradient( true )","applyOptimizer.accept(weights)"]}, + + {"kind":"and","text":"","code":["var pred","var losses = []"]}, + + {"kind":"when","text":"","code":["100.times {"," pred = inputs.matMul( weights ).tanh()"," var loss = ((pred - targets)**2).sum()"," loss.backward()"," weights.applyGradient()"," losses << loss.item()","}"]}, + + {"kind":"then","text":"","code":["pred.shape == [2, 1]","losses[0] > losses[losses.size()-1]","losses[0] > 2","losses[losses.size()-1] < 0.5"]}, + + {"kind":"where","text":"","code":{"applyOptimizer":["{ it.set(Optimizer.SGD.withLearningRate(0.03)) }","{ it.set(Optimizer.ADAM) }","{ it.set(Optimizer.RMSProp.withLearningRate(0.05)) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A very simple 1 layer NN converges. [2]", + "result":"PASS", + "duration":"0.255 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/summary.json b/docs/spock/reports/summary.json index 57d616f65..b4161edda 100644 --- a/docs/spock/reports/summary.json +++ b/docs/spock/reports/summary.json @@ -1,10 +1,10 @@ { "project": "Neureka", "version": "0.20.1", - "created": "Thu Jun 29 18:08:38 CEST 2023", + "created": "Mon Dec 16 14:38:28 CET 2024", "statistics":{ "runs":"99", - "passed":"99", + "passed":"96", "failed":"0", "featureFailures":"0", "successRate":"1.0", @@ -86,13 +86,13 @@ "className":"st.Broad_System_Test", "title":"", "narrative":"", - "featureCount":"1", + "featureCount":"4", "failures":"0", "errors":"0", "skipped":"0" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"The long broad integration test runs successfully.","extraInfo":[]}], + "executedFeatures":[{"id":"A function with expression \"(-3*(2*(i0*-1)))*(-1*i0)\" can be backpropagated.","extraInfo":[]},{"id":"A function with expression \"softplus((I[0]xI[1])*-100)\" can be backpropagated.","extraInfo":[]},{"id":"A function with expression \"softplus(tanh(I[0]*I[1]*2)*I[1])\" can be backpropagated.","extraInfo":[]},{"id":"The long broad integration test runs successfully.","extraInfo":[]}], "ignoredFeatures":[] },{ "className":"st.NN_Concepts_Spec", @@ -377,11 +377,11 @@ "featureCount":"4", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"4" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"Ad hoc compilation produces expected exceptions when duplication is found.","extraInfo":[]},{"id":"Ad hoc compilation produces expected exceptions.","extraInfo":[]},{"id":"An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","extraInfo":[]},{"id":"Trying to restore a tensor which is not on a device raises exception.","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[], + "ignoredFeatures":[{"id":"Ad hoc compilation produces expected exceptions when duplication is found.","extraInfo":[]},{"id":"Ad hoc compilation produces expected exceptions.","extraInfo":[]},{"id":"An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","extraInfo":[]},{"id":"Trying to restore a tensor which is not on a device raises exception.","extraInfo":[]}] },{ "className":"ut.device.OpenCLDevice_Spec", "title":"The OpenCLDevice Specification", @@ -389,11 +389,11 @@ "featureCount":"8", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"8" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"Ad hoc compilation produces executable kernel.","extraInfo":[]},{"id":"Ad hoc compilation works for WIP general purpose matrix multiplication.","extraInfo":[]},{"id":"Ad hoc compilation works for custom column major based tiled matrix multiplication.","extraInfo":[]},{"id":"Ad hoc compilation works for custom simple row major based matrix multiplication.","extraInfo":[]},{"id":"Ad hoc matrix multiplication works for multiple of 16 matrices.","extraInfo":[]},{"id":"An OpenCLDevice loads tensors in a provided lambda temporarily.","extraInfo":[]},{"id":"We can get the items of an outsourced tensor as a primitive array.","extraInfo":[]},{"id":"We can take a look at the underlying data array of an outsourced tensor through the unsafe API.","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[], + "ignoredFeatures":[{"id":"Ad hoc compilation produces executable kernel.","extraInfo":[]},{"id":"Ad hoc compilation works for WIP general purpose matrix multiplication.","extraInfo":[]},{"id":"Ad hoc compilation works for custom column major based tiled matrix multiplication.","extraInfo":[]},{"id":"Ad hoc compilation works for custom simple row major based matrix multiplication.","extraInfo":[]},{"id":"Ad hoc matrix multiplication works for multiple of 16 matrices.","extraInfo":[]},{"id":"An OpenCLDevice loads tensors in a provided lambda temporarily.","extraInfo":[]},{"id":"We can get the items of an outsourced tensor as a primitive array.","extraInfo":[]},{"id":"We can take a look at the underlying data array of an outsourced tensor through the unsafe API.","extraInfo":[]}] },{ "className":"ut.device.OpenCL_Spec", "title":"Working with OpenCL", @@ -401,11 +401,11 @@ "featureCount":"5", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"5" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"A given OpenCL context can be disposed!","extraInfo":[]},{"id":"An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","extraInfo":[]},{"id":"First found OpenCLDevice will have realistic numeric properties.","extraInfo":[]},{"id":"First found OpenCLDevice will have realistic properties inside summary query.","extraInfo":[]},{"id":"First found OpenCLDevice will have realistic text properties.","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[], + "ignoredFeatures":[{"id":"A given OpenCL context can be disposed!","extraInfo":[]},{"id":"An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.","extraInfo":[]},{"id":"First found OpenCLDevice will have realistic numeric properties.","extraInfo":[]},{"id":"First found OpenCLDevice will have realistic properties inside summary query.","extraInfo":[]},{"id":"First found OpenCLDevice will have realistic text properties.","extraInfo":[]}] },{ "className":"ut.device.internal.CLFunctionCompiler_Spec", "title":"Turning functions into kernels.", @@ -413,11 +413,11 @@ "featureCount":"4", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"2" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"The CLFunctionCompiler produces an operation which properly integrates to the backend.","extraInfo":[]},{"id":"The CLFunctionCompiler produces the expected \"ad hoc\" kernel.","extraInfo":[]},{"id":"The OpenCLDevice produces a working optimized Function for doubles.","extraInfo":[]},{"id":"The OpenCLDevice produces a working optimized Function for floats.","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[{"id":"The CLFunctionCompiler produces an operation which properly integrates to the backend.","extraInfo":[]},{"id":"The CLFunctionCompiler produces the expected \"ad hoc\" kernel.","extraInfo":[]}], + "ignoredFeatures":[{"id":"The OpenCLDevice produces a working optimized Function for doubles.","extraInfo":[]},{"id":"The OpenCLDevice produces a working optimized Function for floats.","extraInfo":[]}] },{ "className":"ut.device.internal.CPU_Kernel_Spec", "title":"", @@ -761,11 +761,11 @@ "featureCount":"6", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"1" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"Backend related library objects adhere to the same toString formatting convention!","extraInfo":[]},{"id":"Every Thread instance has their own Neureka instance.","extraInfo":[]},{"id":"Neureka class instance has expected behaviour.","extraInfo":[]},{"id":"Neureka settings class can be locked causing its properties to be immutable.","extraInfo":[]},{"id":"OpenCL related library objects adhere to the same toString formatting convention!","extraInfo":[]},{"id":"Various library objects adhere to the same toString formatting convention!","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[{"id":"Backend related library objects adhere to the same toString formatting convention!","extraInfo":[]},{"id":"Every Thread instance has their own Neureka instance.","extraInfo":[]},{"id":"Neureka class instance has expected behaviour.","extraInfo":[]},{"id":"Neureka settings class can be locked causing its properties to be immutable.","extraInfo":[]},{"id":"Various library objects adhere to the same toString formatting convention!","extraInfo":[]}], + "ignoredFeatures":[{"id":"OpenCL related library objects adhere to the same toString formatting convention!","extraInfo":[]}] },{ "className":"ut.optimization.ADAM_Spec", "title":"", @@ -881,11 +881,11 @@ "featureCount":"7", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"3" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"Tensor mapping lambdas produce expected tensors.","extraInfo":[]},{"id":"The \"map\" method is a shorter convenience method for mapping to the same type.","extraInfo":[]},{"id":"We can analyse the values of a tensor using various predicate receiving methods","extraInfo":[]},{"id":"We can find both min and max items in a tensor by providing a comparator.","extraInfo":[]},{"id":"We can initialize a tensor using a filler lambda mapping indices to items.","extraInfo":[]},{"id":"We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","extraInfo":[]},{"id":"We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\".","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[{"id":"We can find both min and max items in a tensor by providing a comparator.","extraInfo":[]},{"id":"We can initialize a tensor using a filler lambda mapping indices to items.","extraInfo":[]},{"id":"We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".","extraInfo":[]},{"id":"We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\".","extraInfo":[]}], + "ignoredFeatures":[{"id":"Tensor mapping lambdas produce expected tensors.","extraInfo":[]},{"id":"The \"map\" method is a shorter convenience method for mapping to the same type.","extraInfo":[]},{"id":"We can analyse the values of a tensor using various predicate receiving methods","extraInfo":[]}] },{ "className":"ut.tensors.Reshape_Spec", "title":"Tensor Reshaping", @@ -953,11 +953,11 @@ "featureCount":"4", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"1" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"Adding OpenCL device to tensor makes tensor be \"outsourced\" and contain the Device instance as component.","extraInfo":[]},{"id":"Tensors try to migrate themselves to a device that is being added to them as component.","extraInfo":[]},{"id":"The device of a tensor can be accessed via the \"device()\" method.","extraInfo":[]},{"id":"When creating slices of tensors then this should trigger a \"parent - child\" relation noticeable to the device!","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[{"id":"Tensors try to migrate themselves to a device that is being added to them as component.","extraInfo":[]},{"id":"The device of a tensor can be accessed via the \"device()\" method.","extraInfo":[]},{"id":"When creating slices of tensors then this should trigger a \"parent - child\" relation noticeable to the device!","extraInfo":[]}], + "ignoredFeatures":[{"id":"Adding OpenCL device to tensor makes tensor be \"outsourced\" and contain the Device instance as component.","extraInfo":[]}] },{ "className":"ut.tensors.Tensor_Dot_Product_Spec", "title":"Tensor Dot Products", @@ -1097,11 +1097,11 @@ "featureCount":"4", "failures":"0", "errors":"0", - "skipped":"0" , + "skipped":"1" , "successRate":"1.0", "duration":"?", - "executedFeatures":[{"id":"Inline operations cause illegal state exceptions.","extraInfo":[]},{"id":"Inline operations causes version incrementation.","extraInfo":[]},{"id":"Non-inline operations do not cause version incrementation.","extraInfo":[]},{"id":"Storing a tensor on a device should not change the version of a tensor (Even though its data changed technically).","extraInfo":[]}], - "ignoredFeatures":[] + "executedFeatures":[{"id":"Inline operations cause illegal state exceptions.","extraInfo":[]},{"id":"Inline operations causes version incrementation.","extraInfo":[]},{"id":"Non-inline operations do not cause version incrementation.","extraInfo":[]}], + "ignoredFeatures":[{"id":"Storing a tensor on a device should not change the version of a tensor (Even though its data changed technically).","extraInfo":[]}] },{ "className":"ut.tensors.exceptions.Tensor_Delete_Exception_Spec", "title":"", diff --git a/docs/spock/reports/ut.autograd.AD_And_Computation_Graph_Spec.json b/docs/spock/reports/ut.autograd.AD_And_Computation_Graph_Spec.json index 836b443be..b4aa54478 100644 --- a/docs/spock/reports/ut.autograd.AD_And_Computation_Graph_Spec.json +++ b/docs/spock/reports/ut.autograd.AD_And_Computation_Graph_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.605 seconds" + "duration":"1.288 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"Reshaping produces expected computation graph and also works with reverse mode AD.", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.006 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,7 +37,7 @@ { "id":"Payloads and derivatives are null after garbage collection.", "result":"PASS", - "duration":"0.601 seconds", + "duration":"1.278 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.autograd.Autograd_Explained.json b/docs/spock/reports/ut.autograd.Autograd_Explained.json index 5bd60c40e..ee1605794 100644 --- a/docs/spock/reports/ut.autograd.Autograd_Explained.json +++ b/docs/spock/reports/ut.autograd.Autograd_Explained.json @@ -1,7 +1,7 @@ { "className":"ut.autograd.Autograd_Explained", "title":"Autograd - Automatic Differentiation", - "narrative":"Central to all neural networks in Neureka is the autograd package.\n The autograd package provides automatic differentiation for all default operations on Tensors.\n Neureka is a define-by-run library, which means that your backpropagation is defined by how\n your code is run, and that every single iteration can be different.\n\n The class neureka.Tensor is the central class of the main package.\n If you set its attribute 'rqsGradient' to True, Neureka starts to track all operations on it.\n When you finish the forward pass of your network\n you can call .backward() and have all the gradients computed\n and distributed to the tensors requiring them automatically.\n\n The gradient for a tensor will be accumulated into a child tensor (component) which\n can be accessed via the '.getGradient()' method.\n\n To stop a tensor from tracking history, you can call '.detach()' to detach it from the\n computation history, and to prevent future computation from being tracked.", + "narrative":"Central to all neural networks in Neureka is the autograd package. \n The autograd package provides automatic differentiation for all default operations on Tensors. \n Neureka is a define-by-run library, which means that your backpropagation is defined by how \n your code is run, and that every single iteration can be different. \n\n The class neureka.Tensor is the central class of the main package. \n If you set its attribute 'rqsGradient' to True, Neureka starts to track all operations on it. \n When you finish the forward pass of your network \n you can call .backward() and have all the gradients computed \n and distributed to the tensors requiring them automatically. \n\n The gradient for a tensor will be accumulated into a child tensor (component) which \n can be accessed via the '.getGradient()' method. \n\n To stop a tensor from tracking history, you can call '.detach()' to detach it from the \n computation history, and to prevent future computation from being tracked.", "subjects":["neureka.Tensor","neureka.autograd.GraphNode"], "statistics":{ "runs":"1", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.006 seconds" + "duration":"0.011 seconds" }, "headers":["\n There’s one more class which is very important for autograd implementation : the 'GraphNode class'! \n Tensor and GraphNode instances are interconnected and build up an acyclic graph, \n that encodes a complete history of computation. \n Each tensor has a .getGraphNode() attribute that references the GraphNode \n that has created a given Tensor instance. \n (except for Tensor created by the user or created by a \"detached\" Function instance... ). \n\n ","\n If you want to compute the derivatives, you can call .backward() on a Tensor. \n If the given Tensor is a scalar (i.e. it holds one element and has shape \"(1)\"), you do not need to \n specify any arguments to backward(), however if it has more elements, \n you should specify a gradient argument that is a tensor of matching shape. \n "],"tags":{},"see":[], "features":[ { "id":"Simple automatic differentiation and propagation.", "result":"PASS", - "duration":"0.004 seconds", + "duration":"0.008 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n How can I compute gradients with Neureka automatically?\n "] }, diff --git a/docs/spock/reports/ut.autograd.Autograd_Flags_Explained.json b/docs/spock/reports/ut.autograd.Autograd_Flags_Explained.json index e34d77aee..29f490524 100644 --- a/docs/spock/reports/ut.autograd.Autograd_Flags_Explained.json +++ b/docs/spock/reports/ut.autograd.Autograd_Flags_Explained.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.048 seconds" + "duration":"0.149 seconds" }, "headers":["\n Autograd Advanced - Custom Autograd
        \n
        \n Neureka does not necessarily perform autograd eagerly.
        \n If required then auto-differentiation will occur as one would expect
        \n similarly to the way PyTorch's autograd works.
        \n However for many use cases it might make sense to use different variants
        \n of auto-differentiation.
        \n This specification covers precisely these different autograd modes.
        \n
        \n "],"tags":{},"see":[], "features":[ { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: false, whenUse: false, doJIT: false, afterBack: .*1.*4\\.5.*, afterUse: .*1.*4\\.5.*, afterRqd: .*1.*4\\.5.*, afterAll: .*1.*4\\.5.*, #0]", + "id":"Advanced backpropagation on all AD-Modes [0]", "result":"PASS", - "duration":"0.008 seconds", + "duration":"0.030 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := false
        \n isApplyingGradientWhenTensorIsUsed := false
        \n isApplyingGradientWhenRequested := false
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -59,9 +59,9 @@ }, { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: true, whenUse: false, doJIT: false, afterBack: .*1.*4\\.5.*, afterUse: .*1.*4\\.5.*, afterRqd: .*5\\.5.*null.*, afterAll: .*5\\.5.*null.*, #1]", + "id":"Advanced backpropagation on all AD-Modes [1]", "result":"PASS", - "duration":"0.005 seconds", + "duration":"0.014 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := false
        \n isApplyingGradientWhenTensorIsUsed := false
        \n isApplyingGradientWhenRequested := true
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -104,9 +104,9 @@ }, { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: false, whenUse: true, doJIT: false, afterBack: .*1.*4\\.5.*, afterUse: .*5\\.5.*null.*, afterRqd: .*5\\.5.*null.*, afterAll: .*5\\.5.*null.*, #2]", + "id":"Advanced backpropagation on all AD-Modes [2]", "result":"PASS", - "duration":"0.004 seconds", + "duration":"0.014 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := false
        \n isApplyingGradientWhenTensorIsUsed := true
        \n isApplyingGradientWhenRequested := false
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -149,9 +149,9 @@ }, { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: true, whenUse: true, doJIT: false, afterBack: .*1.*4\\.5.*, afterUse: .*1.*4\\.5.*, afterRqd: .*1.*4\\.5.*, afterAll: .*5\\.5.*null.*, #3]", + "id":"Advanced backpropagation on all AD-Modes [3]", "result":"PASS", - "duration":"0.004 seconds", + "duration":"0.017 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := false
        \n isApplyingGradientWhenTensorIsUsed := true
        \n isApplyingGradientWhenRequested := true
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -194,9 +194,9 @@ }, { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: false, whenUse: false, doJIT: true, afterBack: .*1.*null.*, afterUse: .*1.*null.*, afterRqd: .*1.*null.*, afterAll: .*1.*null.*, #4]", + "id":"Advanced backpropagation on all AD-Modes [4]", "result":"PASS", - "duration":"0.005 seconds", + "duration":"0.015 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := true
        \n isApplyingGradientWhenTensorIsUsed := false
        \n isApplyingGradientWhenRequested := false
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -239,9 +239,9 @@ }, { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: true, whenUse: false, doJIT: true, afterBack: .*1.*null.*, afterUse: .*1.*null.*, afterRqd: .*5\\.5.*null.*, afterAll: .*5\\.5.*null.*, #5]", + "id":"Advanced backpropagation on all AD-Modes [5]", "result":"PASS", - "duration":"0.005 seconds", + "duration":"0.013 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := true
        \n isApplyingGradientWhenTensorIsUsed := false
        \n isApplyingGradientWhenRequested := true
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -284,9 +284,9 @@ }, { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: false, whenUse: true, doJIT: true, afterBack: .*1.*null.*, afterUse: .*5\\.5.*null.*, afterRqd: .*5\\.5.*null.*, afterAll: .*5\\.5.*null.*, #6]", + "id":"Advanced backpropagation on all AD-Modes [6]", "result":"PASS", - "duration":"0.004 seconds", + "duration":"0.014 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := true
        \n isApplyingGradientWhenTensorIsUsed := true
        \n isApplyingGradientWhenRequested := false
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -329,9 +329,9 @@ }, { - "id":"Advanced backpropagation on all AD-Modes [code: y*y*3, whenRsd: true, whenUse: true, doJIT: true, afterBack: .*1.*null.*, afterUse: .*1.*null.*, afterRqd: .*1.*null.*, afterAll: .*5\\.5.*null.*, #7]", + "id":"Advanced backpropagation on all AD-Modes [7]", "result":"PASS", - "duration":"0.003 seconds", + "duration":"0.011 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n What is JIT-Prop and how does it affect autograd ? Let's take a look !
        \n This run covers the feature having the following settings :
        \n
        \n Neureka.instance().settings().autograd().:
        \n isRetainingPendingErrorForJITProp := true
        \n isApplyingGradientWhenTensorIsUsed := true
        \n isApplyingGradientWhenRequested := true
        \n
        \n ...code producing the result : 'y*y*3'
        \n
        \n is-Retaining-Pending-Error-For-JITProp :
        \n
        \n This flag enables an optimization technique which only propagates error values to
        \n gradients if needed by a tensor (the tensor is used again) and otherwise accumulate them
        \n at divergent differentiation paths within the computation graph.
        \n If the flag is set to true
        \n then error values will accumulate at such junction nodes.
        \n This technique however uses more memory but will
        \n improve performance for some networks substantially.
        \n The technique is termed JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Tensor-Is-Used :
        \n
        \n Gradients will automatically be applied (or JITed) to tensors as soon as
        \n they are being used for calculation (GraphNode instantiation).
        \n This feature works well with JIT-Propagation.
        \n
        \n
        \n is-Applying-Gradient-When-Requested :
        \n
        \n Gradients will only be applied if requested.
        \n Usually this happens immediately, however
        \n if the flag 'applyGradientWhenTensorIsUsed' is set
        \n to true, then the tensor will only be updated by its
        \n gradient if requested AND the tensor is used fo calculation! (GraphNode instantiation).
        \n
        \n Let's take a look : \n "] }, @@ -376,7 +376,7 @@ { "id":"We can create a shallow copy of a tensor detached from the computation graph.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.autograd.Autograd_NN_Spec.json b/docs/spock/reports/ut.autograd.Autograd_NN_Spec.json index 9cad1c1b5..6adc9a057 100644 --- a/docs/spock/reports/ut.autograd.Autograd_NN_Spec.json +++ b/docs/spock/reports/ut.autograd.Autograd_NN_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.autograd.Autograd_NN_Spec", "title":"Simple Neural Network autograd integration test", - "narrative":"The integration test below has been implemented by using\n the following code and the result it produces as reference :\n https://medium.com/dair-ai/a-simple-neural-network-from-scratch-with-pytorch-and-google-colab-c7f3830618e0\n\n The following seed has been used to assure reproducibility :\n 'torch.manual_seed(503672689411)'", + "narrative":"The integration test below has been implemented by using\n the following code and the result it produces as reference : \n https://medium.com/dair-ai/a-simple-neural-network-from-scratch-with-pytorch-and-google-colab-c7f3830618e0 \n\n The following seed has been used to assure reproducibility :\n 'torch.manual_seed(503672689411)'", "subjects":["neureka.Tensor","neureka.autograd.GraphNode"], "statistics":{ - "runs":"5", + "runs":"7", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.076 seconds" + "duration":"0.148 seconds" }, "headers":["\n\n "],"tags":{},"see":[], "features":[ { "id":"Autograd works in a simple mat-mul based feed forward neural network.", "result":"PASS", - "duration":"0.012 seconds", + "duration":"0.023 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,7 +35,7 @@ { "id":"Autograd works in a simple convolutional dot product based feed forward neural network.", "result":"PASS", - "duration":"0.028 seconds", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -54,7 +54,7 @@ { "id":"Autograd works in a simple convolutional dot product and float based feed forward neural network.", "result":"PASS", - "duration":"0.023 seconds", + "duration":"0.049 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -71,9 +71,38 @@ }, { - "id":"Autograd work for simple matrix multiplications.", + "id":"Autograd work for simple matrix multiplications. [0]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var a = Tensor.of([2, 3], -1f..4f).setRqsGradient(true).mut.toType(type)","var b = Tensor.of([3, 1], [-4d, -2d, 0d]).setRqsGradient(true).mut.toType(type)"]}, + + {"kind":"when","text":"","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"","code":["c.itemType == type"]}, + + {"kind":"and","text":"","code":["a.toString() == \"(2x3):[\" +"," \"-1.0, 0.0, 1.0, \" +"," \"2.0, 3.0, 4.0\" +"," \"]:g:[null]\""]}, + + {"kind":"and","text":"","code":["b.toString() == \"(3x1):[\" +"," \"-4.0, \" +"," \"-2.0, \" +"," \"0.0\" +"," \"]:g:[null]\""]}, + + {"kind":"and","text":"","code":["def cStr = c.toString()","cStr.contains \"(2x1):[4.0, -14.0]\"","cStr.contains \"->d(3x2):[-1.0, 2.0, 0.0, 3.0, 1.0, 4.0]\"","cStr.contains \"->d(1x3):[-4.0, -2.0, 0.0]\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of(c.shape, [-1d, 1d]).mut.toType(type)) // (2x1):[-1, 1]"]}, + + {"kind":"then","text":"","code":["a.toString() == \"(2x3):[-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]:g:[4.0, 2.0, 0.0, -4.0, -2.0, 0.0]\"","b.toString() == \"(3x1):[-4.0, -2.0, 0.0]:g:[3.0, 3.0, 3.0]\""]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Autograd work for simple matrix multiplications. [1]", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -100,7 +129,7 @@ }, { - "id":"Autograd works for 2 matrix multiplications in a row.", + "id":"Autograd works for 2 matrix multiplications in a row. [0]", "result":"PASS", "duration":"0.005 seconds", "iterations":{ @@ -126,6 +155,35 @@ {"kind":"where","text":"We test this feature on both the CPU as well as the GPU.","code":{"device":["CPU.get()","Device.get('first gpu')"]}} ], "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Autograd works for 2 matrix multiplications in a row. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We set the experimental \"autoConvertToFloat\" flag to true.","code":["Neureka.get().backend().find(CLBackend).ifPresent({ it.settings.autoConvertToFloat=true })"]}, + + {"kind":"and","text":"\n We create 3 tensors with the shapes (2x3), (3x1) and (2x1) for matrix multiplication.\n All of them ought to be stored on the provided device and \n only the first 2 require gradients, whereas the third one does not.\n We use these tensors to mimic 2 linear forward passes in a neural network.\n ","code":["def a = Tensor.of([2, 3], -1d..4d).setRqsGradient(true).to(device)","def b = Tensor.of([3, 1], [-4d, -2d, 0d]).setRqsGradient(true).to(device)","def x = Tensor.of([[0.5d, 0.5d]]).to(device)"]}, + + {"kind":"expect","text":"Initially none of the tensors requiring gradients have any (see \"g:[null]\").","code":["a.toString() == \"(2x3):[\" +"," \"-1.0, 0.0, 1.0, \" +"," \"2.0, 3.0, 4.0\" +"," \"]:g:[null]\"","b.toString() == \"(3x1):[\" +"," \"-4.0, \" +"," \"-2.0, \" +"," \"0.0\" +"," \"]:g:[null]\"","x.toString() == \"(1x2):[0.5, 0.5]\""]}, + + {"kind":"when","text":"We perform 2 matrix multiplications in a row, using all 3 previously created tensors...","code":["var c = a.matMul(b)","var o = x.matMul(c)"]}, + + {"kind":"then","text":"The results from the two matrix multiplications are as expected.","code":["var cStr = c.toString()","cStr.contains \"(2x1):[4.0, -14.0]\"","cStr.contains \"->d(3x2):[-1.0, 2.0, 0.0, 3.0, 1.0, 4.0]\"","cStr.contains \"->d(1x3):[-4.0, -2.0, 0.0]\"","o.toString() == \"(1x1):[-5.0]; ->d(2x1):[0.5, 0.5]\""]}, + + {"kind":"and","text":"We still expect the first 2 tensors to not yet have any gradients (see \"g:[null]\").","code":["a.toString() == \"(2x3):[\" +"," \"-1.0, 0.0, 1.0, \" +"," \"2.0, 3.0, 4.0\" +"," \"]:g:[null]\"","b.toString() == \"(3x1):[\" +"," \"-4.0, \" +"," \"-2.0, \" +"," \"0.0\" +"," \"]:g:[null]\"","x.toString() == \"(1x2):[0.5, 0.5]\""]}, + + {"kind":"when","text":"We perform back-propagation...","code":["o.backward()"]}, + + {"kind":"then","text":"Contrary to before, the 2 matrices now do have the expected gradients automatically generated by the aut-grad system.","code":["a.toString() == \"(2x3):[-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]:g:[-2.0, -1.0, 0.0, -2.0, -1.0, 0.0]\"","b.toString() == \"(3x1):[-4.0, -2.0, 0.0]:g:[0.5, 1.5, 2.5]\""]}, + + {"kind":"where","text":"We test this feature on both the CPU as well as the GPU.","code":{"device":["CPU.get()","Device.get('first gpu')"]}} + ], + "problems":{"dataValues":[], "errors":[]} } ], diff --git a/docs/spock/reports/ut.autograd.Autograd_Tensor_Spec.json b/docs/spock/reports/ut.autograd.Autograd_Tensor_Spec.json index d28adf068..711218a24 100644 --- a/docs/spock/reports/ut.autograd.Autograd_Tensor_Spec.json +++ b/docs/spock/reports/ut.autograd.Autograd_Tensor_Spec.json @@ -4,19 +4,19 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"3", + "runs":"4", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.015 seconds" + "duration":"0.052 seconds" }, "headers":["\n

        Autograd Tensor Behavior

        \n

        \n Specified below is the behavior of the autograd system.\n

        \n "],"tags":{},"see":[], "features":[ { "id":"Test basic autograd behaviour. (Not on device)", "result":"PASS", - "duration":"0.003 seconds", + "duration":"0.014 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -55,9 +55,74 @@ }, { - "id":"Second-Test \"x-mul\" autograd behaviour. (Not on device)", + "id":"Second-Test \"x-mul\" autograd behaviour. (Not on device) [0]", "result":"PASS", - "duration":"0.007 seconds", + "duration":"0.014 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Gradient auto apply for tensors in ue is set to false.","code":["Neureka.get().settings().autograd().setIsApplyingGradientWhenTensorIsUsed(false)"]}, + + {"kind":"and","text":"Tensor legacy view is set to true.","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"when","text":"","code":["def x = Tensor.ofDoubles()"," .withShape(3, 3)"," .andFill("," 1.0, 2.0, 5.0,"," -1.0, 4.0, -2.0,"," -2.0, 3.0, 4.0,"," )","def y = Tensor.of("," Shape.of(2, 2),"," new double[]{"," -1, 3,"," 2, 3,"," }).setRqsGradient(true)"]}, + + {"kind":"then","text":"","code":["then : y.toString().contains(\":g:(null)\")"]}, + + {"kind":"when","text":"","code":["when : def z = Tensor.of(\"I0xi1\", x, y)"]}, + + {"kind":"then","text":"","code":["then : z.toString().contains(\"[2x2]:(15.0, 15.0, 18.0, 8.0)\")"]}, + + {"kind":"when","text":"","code":["when : z = Tensor.of(new Object[]{x, \"x\", y})"]}, + + {"kind":"then","text":"","code":["then : z.toString().contains(\"[2x2]:(15.0, 15.0, 18.0, 8.0)\")"]}, + + {"kind":"when","text":"","code":["when : z.backward(Tensor.of(Shape.of(2, 2), 1))"]}, + + {"kind":"then","text":"","code":["then : y.toString().contains(\"[2x2]:(-1.0, 3.0, 2.0, 3.0):g:(6.0, 9.0, 4.0, 9.0)\")"]}, + + {"kind":"when","text":"","code":["x = Tensor.of(Shape.of(3, 3),"," new double[]{"," 1, 2, 5,"," -1, 4, -2,"," -2, 3, 4,"," }"," ).mut.toType(type)","y = Tensor.of(Shape.of(2, 2),"," new double[]{"," -1, 3,"," 2, 3,"," }).setRqsGradient(true).mut.toType(type)"]}, + + {"kind":"then","text":"","code":["then : y.toString().contains(\":g:(null)\")"]}, + + {"kind":"when","text":"","code":["when : z = Tensor.of(\"I0xi1\", y, x)"]}, + + {"kind":"then","text":"","code":["then : z.toString().contains(\"[2x2]:(15.0, 15.0, 18.0, 8.0)\")"]}, + + {"kind":"and","text":"","code":["and : z.itemType == type"]}, + + {"kind":"when","text":"","code":["when : z = Tensor.of(y, \"x\", x)"]}, + + {"kind":"then","text":"","code":["then : z.toString().contains(\"[2x2]:(15.0, 15.0, 18.0, 8.0)\")"]}, + + {"kind":"and","text":"","code":["and : z.itemType == type"]}, + + {"kind":"when","text":"","code":["when : z.backward(Tensor.of(Shape.of(2, 2), 1d))"]}, + + {"kind":"then","text":"","code":["then : y.toString().contains(\"[2x2]:(-1.0, 3.0, 2.0, 3.0):g:(6.0, 9.0, 4.0, 9.0)\")"]}, + + {"kind":"when","text":"","code":["x = Tensor.of(Shape.of(1), 3d).mut.toType(type)","Tensor b = Tensor.of(Shape.of(1), -5d).mut.toType(type)","Tensor w = Tensor.of(Shape.of(1), -2d).mut.toType(type)","z = Tensor.of(\"I0*i1*i2\", x, b, w)"]}, + + {"kind":"then","text":"","code":["then : z.toString().contains(\"[1]:(30.0)\")"]}, + + {"kind":"and","text":"","code":["and : z.itemType == type"]}, + + {"kind":"when","text":"","code":["x = Tensor.of(Shape.of(1), 4d).setRqsGradient(true).mut.toType(type)","b = Tensor.of(Shape.of(1), 0.5d).mut.toType(type)","w = Tensor.of(Shape.of(1), 0.5d).mut.toType(type)","y = Tensor.of(\"(2**i0**i1**i2**2\", x, b, w)"]}, + + {"kind":"then","text":"","code":["y.toString().contains(\"[1]:(9.24238);\")","y.toString().contains(\" ->d[1]:(4.32078)\")"]}, + + {"kind":"and","text":"","code":["y.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Second-Test \"x-mul\" autograd behaviour. (Not on device) [1]", + "result":"PASS", + "duration":"0.011 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -122,7 +187,7 @@ { "id":"A tensor used as derivative within a computation graph will throw exception when trying to deleting it.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.006 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.autograd.JITProp_Autograd_Tensor_Spec.json b/docs/spock/reports/ut.autograd.JITProp_Autograd_Tensor_Spec.json index 4b811e70e..a4a9a0f4b 100644 --- a/docs/spock/reports/ut.autograd.JITProp_Autograd_Tensor_Spec.json +++ b/docs/spock/reports/ut.autograd.JITProp_Autograd_Tensor_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.121 seconds" + "duration":"0.638 seconds" }, "headers":["\n

        Autograd Tensor Integration Tests

        \n

        \n This specification contains tests which\n cover the autograd behavior of tensors.
        \n The classes involved in governing the tested features are\n the Tensor, GraphNode and Function (& implementations) classes.\n

        \n "],"tags":{},"see":[], "features":[ { "id":"Test pending error optimization", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -39,7 +39,7 @@ { "id":"Test JIT propagation variant one.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -60,7 +60,7 @@ { "id":"Test JIT propagation variant two.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -81,7 +81,7 @@ { "id":"Gradient auto-apply kicks in when used AD uses JIT prop", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -110,7 +110,7 @@ { "id":"Test no preemptive gradient apply when not requested and auto apply and JIT_prop", "result":"PASS", - "duration":"0.106 seconds", + "duration":"0.598 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -147,7 +147,7 @@ { "id":"Test autograd without JIT and auto apply.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -172,7 +172,7 @@ { "id":"Test in-differential and JIT with auto apply", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -193,7 +193,7 @@ { "id":"Test no JIT prop when forward AD", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.json b/docs/spock/reports/ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.json index 47d5d09ec..b16b6ea68 100644 --- a/docs/spock/reports/ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.json +++ b/docs/spock/reports/ut.autograd.internal.GraphNode_Instantiation_Exception_Unit_Tests.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.071 seconds" + "duration":"0.189 seconds" }, "headers":["\n

        GraphNode Instantiation Tests

        \n

        \n Specified below are strict tests covering the behavior\n of the GraphNode class during instantiation where\n inputs are setup to cause expected exceptions.\n

        \n "],"tags":{},"see":[], "features":[ { "id":"GraphNode instantiation throws exception because tensors of ExecutionCall do not return GraphNode instances.", "result":"PASS", - "duration":"0.048 seconds", + "duration":"0.152 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,7 +35,7 @@ { "id":"GraphNode throws an exception when trying to execute an inline operation on inputs with active autograd.", "result":"PASS", - "duration":"0.020 seconds", + "duration":"0.034 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests.json b/docs/spock/reports/ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests.json index 51789744b..03a1e452b 100644 --- a/docs/spock/reports/ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests.json +++ b/docs/spock/reports/ut.autograd.internal.GraphNode_Tensor_Exception_Unit_Tests.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.004 seconds" + "duration":"0.008 seconds" }, "headers":["\n

        GraphNode Tensor Exceptions

        \n

        \n Specified below are strict tests covering the exception behavior\n of the GraphNode class interacting with Tensor instances.\n

        \n "],"tags":{},"see":[], "features":[ { "id":"A tensor cannot be deleted if it is part of a graph and the tensor is used as derivative.", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.006 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.backend.Backend_Extension_Spec.json b/docs/spock/reports/ut.backend.Backend_Extension_Spec.json index 03024bcdd..4e0ed7ae2 100644 --- a/docs/spock/reports/ut.backend.Backend_Extension_Spec.json +++ b/docs/spock/reports/ut.backend.Backend_Extension_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.058 seconds" + "duration":"0.071 seconds" }, "headers":["\n This specification defines the behavior of\n Operation instances and their ability to be extended!
        \n "],"tags":{},"see":[], "features":[ { "id":"Mock operation interacts with FunctionNode (AbstractFunction) instance as expected.", "result":"PASS", - "duration":"0.056 seconds", + "duration":"0.069 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.backend.Backend_MatMul_Extension_Spec.json b/docs/spock/reports/ut.backend.Backend_MatMul_Extension_Spec.json index 41504d5e7..e789bd750 100644 --- a/docs/spock/reports/ut.backend.Backend_MatMul_Extension_Spec.json +++ b/docs/spock/reports/ut.backend.Backend_MatMul_Extension_Spec.json @@ -4,19 +4,19 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"3", + "runs":"17", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.053 seconds" + "duration":"0.083 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"GEMM matrix multiplication reference implementation can be set as custom OperationType and works as expected.", + "id":"GEMM matrix multiplication reference implementation can be set as custom OperationType and works as expected. [0]", "result":"PASS", - "duration":"0.045 seconds", + "duration":"0.060 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -43,7 +43,53 @@ }, { - "id":"Test context mock for opencl reference implementations.", + "id":"GEMM matrix multiplication reference implementation can be set as custom OperationType and works as expected. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["BackendContext oldContext = Neureka.get().backend()","BackendContext testContext = oldContext.clone()"]}, + + {"kind":"when","text":"","code":["def run = testContext.runner()"]}, + + {"kind":"then","text":"","code":["run { testContext == Neureka.get().backend() }"]}, + + {"kind":"when","text":"","code":["Tensor t1 = Tensor.of([row_sze, com_sze], -3d..8d)","Tensor t2 = Tensor.of([com_sze, col_sze], -7d..4d)"," run {"," Neureka.get().backend()"," .addOperation("," Operation"," .builder()"," .identifier('test_function')"," .operator('test_function')"," .arity(-1)"," .isIndexer(false)"," .isOperator(false)"," .isDifferentiable(true)"," .isInline(false)"," .stringifier("," children -> {"," String expression = String.join(\", \", children);"," if (expression.charAt(0) == '(' && expression.charAt(expression.length() - 1) == ')') {"," return \"test_function\" + expression;"," }"," return \"test_function\" + \"(\" + expression + \")\";"," }"," )"," .build()"," .setAlgorithm("," DeviceAlgorithm.withName(\"my_algorithm\")"," .setIsSuitableFor(call -> SuitabilityPredicate.GOOD )"," .setAutogradModeFor(call -> AutoDiffMode.BACKWARD_ONLY )"," .setExecution("," (outerCaller, outerCall) ->"," Result.of(AbstractDeviceAlgorithm.executeFor("," outerCaller, outerCall,"," call -> AbstractDeviceAlgorithm.executeDeviceAlgorithm( call )"," ))"," .withAutoDiff((ADActionSupplier){ Function f, ExecutionCall> adCall, boolean forward ->"," if (forward) throw new IllegalArgumentException(\"Reshape operation does not support forward-AD!\");"," return ADAction.of((t, error) -> Function.of(f.toString(), false).derive(new Tensor[]{error}, 0));"," })"," )"," .setCallPreparation("," call -> {"," Device device = call.getDevice();"," if ( call.input( 0 ) == null ) // Creating a new tensor:"," {"," Shape shp = Shape.of(call.input( 1 ).getNDConf().shape(0), call.input( 2 ).getNDConf().shape(1))"," Tensor output = Tensor.of(shp, 0.0);"," output.mut.setIsVirtual(false);"," device.store( output );"," call = call.withInputAt( 0, output );"," }"," return call;"," }"," )"," .setImplementationFor("," CPU.class,"," CPUImplementation"," .withArity(3)"," .andImplementation("," (call) -> {"," Tensor drn = call.input(Number.class, 0)"," Tensor src1 = call.input(Number.class, 1)"," Tensor src2 = call.input(Number.class, 2)"," assert src1.shape(1) == src2.shape(0)",""," //for ( int i=0; i a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [6]", + "result":"PASS", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [7]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [8]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [9]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [10]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] + }, + "blocks":[ + {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, + + {"kind":"and","text":"We create the data layout type based on the provided string...","code":["var dataLayout = layout == 'ROW' ? NDConfiguration.Layout.ROW_MAJOR : NDConfiguration.Layout.COLUMN_MAJOR"]}, + + {"kind":"and","text":"After that we convert both matrices to the layout!","code":["a.mut.toLayout( dataLayout )","b.mut.toLayout( dataLayout )"]}, + + {"kind":"expect","text":"This should of cause make report that they indeed have this new layout.","code":["a.NDConf.layout == dataLayout","b.NDConf.layout == dataLayout"]}, + + {"kind":"when","text":"We now perform the matrix multiplication with the 2 matrix tensors...","code":["Tensor c = a.matMul(b)"]}, + + {"kind":"then","text":"The result will have the expected (M x N) shape.","code":["c.shape == [M,N]"]}, + + {"kind":"and","text":"It should have the expected value array.","code":["c.items == expectedC"]}, + + {"kind":"where","text":"We use the following scenario parameters:","code":{"layout":["'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'ROW'","'COL'","'COL'","'COL'","'COL'","'COL'","'COL'"],"type":["Double","Double","Double","Float","Float","Float","Double","Double","Double","Float","Float","Float"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"A":["[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]","[4, 3, 2, 1] as double[]","[-2,1] as double[]","[-2,1] as double[]","[4,3,2,1] as float[]","[-2,1] as float[]","[-2,1] as float[]"],"B":["[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]","[-0.5, 1.5, 1, -2] as double[]","[-1, -1.5] as double[]","[-1, -1.5] as double[]","[-0.5, 1.5, 1, -2] as float[]","[-1, -1.5] as float[]","[-1, -1.5] as float[]"],"expectedC":["[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]","[1, 0, 0, 1 ] as double[]","[ 0.5 ] as double[]","[ 2.0, 3.0, -1.0, -1.5 ] as double[]","[ 1, 0, 0, 1 ] as float[]","[ 0.5 ] as float[]","[ 2.0, 3.0, -1.0, -1.5 ] as float[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The simple CPU matrix multiplication implementation works as expected. [11]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Matrix multiplication is possible between matrices of various dimensions,\n data types as well as data layouts!\n "] }, "blocks":[ {"kind":"given","text":"We instantiate 2 matrices based on the data from the data table at the end of this method.","code":["Tensor a = Tensor.of(type, Shape.of(M,K), A)","Tensor b = Tensor.of(type, Shape.of(K,N), B)"]}, diff --git a/docs/spock/reports/ut.backend.core.Backend_Algorithm_AD_Spec.json b/docs/spock/reports/ut.backend.core.Backend_Algorithm_AD_Spec.json index 395401675..02dee9df6 100644 --- a/docs/spock/reports/ut.backend.core.Backend_Algorithm_AD_Spec.json +++ b/docs/spock/reports/ut.backend.core.Backend_Algorithm_AD_Spec.json @@ -4,19 +4,100 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"4", + "runs":"37", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.046 seconds" + "duration":"0.091 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Operator implementations behave as expected.", + "id":"Operator implementations behave as expected. [0]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a bi-elementwise algorithm.","code":["alg instanceof BiElementwise"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"A auto diff mode is being created by every algorithm...","code":["AutoDiffMode mode = alg.autoDiffModeFrom( call )"]}, + + {"kind":"then","text":"The agent is configured to perform forward-AD and it contains the derivative generated by the function!","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Operator implementations behave as expected. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a bi-elementwise algorithm.","code":["alg instanceof BiElementwise"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"A auto diff mode is being created by every algorithm...","code":["AutoDiffMode mode = alg.autoDiffModeFrom( call )"]}, + + {"kind":"then","text":"The agent is configured to perform forward-AD and it contains the derivative generated by the function!","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Operator implementations behave as expected. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a bi-elementwise algorithm.","code":["alg instanceof BiElementwise"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"A auto diff mode is being created by every algorithm...","code":["AutoDiffMode mode = alg.autoDiffModeFrom( call )"]}, + + {"kind":"then","text":"The agent is configured to perform forward-AD and it contains the derivative generated by the function!","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Operator implementations behave as expected. [3]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -41,7 +122,7 @@ }, { - "id":"Activation implementations behave as expected.", + "id":"Operator implementations behave as expected. [4]", "result":"PASS", "duration":"0", "iterations":{ @@ -54,6 +135,33 @@ {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + {"kind":"and","text":"The algorithm is a bi-elementwise algorithm.","code":["alg instanceof BiElementwise"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"A auto diff mode is being created by every algorithm...","code":["AutoDiffMode mode = alg.autoDiffModeFrom( call )"]}, + + {"kind":"then","text":"The agent is configured to perform forward-AD and it contains the derivative generated by the function!","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, @@ -72,7 +180,7 @@ }, { - "id":"Convolution implementations behave as expected.", + "id":"Activation implementations behave as expected. [1]", "result":"PASS", "duration":"0", "iterations":{ @@ -85,7 +193,7 @@ {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, - {"kind":"and","text":"The algorithm is a convolution algorithm.","code":["alg instanceof NDConvolution"]}, + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, @@ -97,13 +205,912 @@ {"kind":"then","text":"","code":["mode != null"]}, - {"kind":"where","text":"The variable \"imp\" is from a List of Operation implementations of type \"Convolution\".","code":{"alg":[]}} + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [18]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [19]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [20]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [21]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [22]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [23]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations behave as expected. [24]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a activation algorithm.","code":["alg instanceof ElementwiseAlgorithm"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Activation\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Convolution implementations behave as expected.", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current Neureka instance is being reset.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a convolution algorithm.","code":["alg instanceof NDConvolution"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of Operation implementations of type \"Convolution\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Broadcast implementations have expected properties. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first reset the library settings.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a broadcast algorithm.","code":["alg instanceof Broadcast"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Convolution\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Broadcast implementations have expected properties. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first reset the library settings.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a broadcast algorithm.","code":["alg instanceof Broadcast"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Convolution\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Broadcast implementations have expected properties. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first reset the library settings.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a broadcast algorithm.","code":["alg instanceof Broadcast"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Convolution\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Broadcast implementations have expected properties. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first reset the library settings.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a broadcast algorithm.","code":["alg instanceof Broadcast"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Convolution\".","code":{"alg":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Broadcast implementations have expected properties. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We first reset the library settings.","code":["Neureka.get().reset()"]}, + + {"kind":"and","text":"A mock ExecutionCall.","code":["var call = ExecutionCall.of().running(Mock(Operation)).on(Mock(Device))"]}, + + {"kind":"expect","text":"The algorithm is not null.","code":["alg != null"]}, + + {"kind":"and","text":"The algorithm is a broadcast algorithm.","code":["alg instanceof Broadcast"]}, + + {"kind":"and","text":"It has a non empty name and string representation.","code":["!alg.name.isEmpty()","!alg.toString().isEmpty()"]}, + + {"kind":"when","text":"","code":["var suitability = alg.isSuitableFor(call)"]}, + + {"kind":"then","text":"","code":["0 <= suitability && suitability <= 1"]}, + + {"kind":"when","text":"","code":["var mode = alg.autoDiffModeFrom(call)"]}, + + {"kind":"then","text":"","code":["mode != null"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Convolution\".","code":{"alg":[]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Broadcast implementations have expected properties.", + "id":"Broadcast implementations have expected properties. [5]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.backend.core.Backend_Algorithm_Implementation_Spec.json b/docs/spock/reports/ut.backend.core.Backend_Algorithm_Implementation_Spec.json index 0b12cd4dd..149807e2e 100644 --- a/docs/spock/reports/ut.backend.core.Backend_Algorithm_Implementation_Spec.json +++ b/docs/spock/reports/ut.backend.core.Backend_Algorithm_Implementation_Spec.json @@ -4,17 +4,17 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"3", + "runs":"35", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.060 seconds" + "duration":"0.097 seconds" }, "headers":["\n This specification defines the behavior of implementations of the \n Algorithm interface!
        \n "],"tags":{},"see":[], "features":[ { - "id":"Operator implementations have expected Executor instances.", + "id":"Operator implementations have expected Executor instances. [0]", "result":"PASS", "duration":"0", "iterations":{ @@ -31,7 +31,7 @@ }, { - "id":"Activation implementations have expected Executor instances.", + "id":"Operator implementations have expected Executor instances. [1]", "result":"PASS", "duration":"0", "iterations":{ @@ -48,9 +48,561 @@ }, { - "id":"HostExecutors of Operator implementations behave as expected.", + "id":"Operator implementations have expected Executor instances. [2]", "result":"PASS", - "duration":"0.049 seconds", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Operator implementations have expected Executor instances. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Operator implementations have expected Executor instances. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [0]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [18]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [19]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [20]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [21]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [22]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [23]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation implementations have expected Executor instances. [24]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"Host- and CL- executor instance are being fetched...","code":["def hostExecutor = imp.getImplementationFor( CPU.class )","def clExecutor = imp.getImplementationFor( OpenCLDevice.class )"]}, + + {"kind":"then","text":"The variables containing the executor instances are not null.","code":["hostExecutor != null","clExecutor != null || !Neureka.get().canAccessOpenCLDevice()"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"HostExecutors of Operator implementations behave as expected. [0]", + "result":"PASS", + "duration":"0.064 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Mock instances to simulate an ExecutionCall instance.","code":["var call = Mock( ExecutionCall )","var device = Mock( CPU )","var tensor = Mock( Tensor )","var mutate = Mock(MutateTensor)","var ndConf = Mock(NDConfiguration)","var hostExecutor = imp.getImplementationFor( CPU.class )","var nativeExecutor = Mock( CPU.JVMExecutor )","var dataObj = Mock(Data)"]}, + + {"kind":"when","text":"Host-executor instance is being called...","code":["hostExecutor.run( call )"]}, + + {"kind":"then","text":"The mock objects are being called as expected.","code":["(1.._) * call.arity() >> 3","(0.._) * tensor.getMut() >> mutate","(0.._) * tensor.mut() >> mutate","(1.._) * call.getDevice() >> device","1 * device.getExecutor() >> nativeExecutor","1 * nativeExecutor.threaded( _, _ )","(0.._) * call.inputs() >> new Tensor[]{ tensor, tensor, tensor }","(0.._) * call.input({it >= 0 && it <= 2}) >> tensor","(0.._) * call.input( Number.class, 0 ) >> tensor","(0.._) * call.input(0) >> tensor","(0.._) * call.input( Number.class, 1 ) >> tensor","(1.._) * tensor.size() >> 0","(0.._) * tensor.itemType >> Double","(0.._) * tensor.getDataAs(double[]) >> new double[0]","(0.._) * mutate.data >> dataObj","(0.._) * dataObj.get >> new double[0]","(0.._) * mutate.getDataAs(double[]) >> new double[0]","(0.._) * mutate.getDataForWriting(double[]) >> new double[0]","(1.._) * tensor.getNDConf() >> ndConf","(1.._) * ndConf.isSimple() >> false"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"HostExecutors of Operator implementations behave as expected. [1]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Mock instances to simulate an ExecutionCall instance.","code":["var call = Mock( ExecutionCall )","var device = Mock( CPU )","var tensor = Mock( Tensor )","var mutate = Mock(MutateTensor)","var ndConf = Mock(NDConfiguration)","var hostExecutor = imp.getImplementationFor( CPU.class )","var nativeExecutor = Mock( CPU.JVMExecutor )","var dataObj = Mock(Data)"]}, + + {"kind":"when","text":"Host-executor instance is being called...","code":["hostExecutor.run( call )"]}, + + {"kind":"then","text":"The mock objects are being called as expected.","code":["(1.._) * call.arity() >> 3","(0.._) * tensor.getMut() >> mutate","(0.._) * tensor.mut() >> mutate","(1.._) * call.getDevice() >> device","1 * device.getExecutor() >> nativeExecutor","1 * nativeExecutor.threaded( _, _ )","(0.._) * call.inputs() >> new Tensor[]{ tensor, tensor, tensor }","(0.._) * call.input({it >= 0 && it <= 2}) >> tensor","(0.._) * call.input( Number.class, 0 ) >> tensor","(0.._) * call.input(0) >> tensor","(0.._) * call.input( Number.class, 1 ) >> tensor","(1.._) * tensor.size() >> 0","(0.._) * tensor.itemType >> Double","(0.._) * tensor.getDataAs(double[]) >> new double[0]","(0.._) * mutate.data >> dataObj","(0.._) * dataObj.get >> new double[0]","(0.._) * mutate.getDataAs(double[]) >> new double[0]","(0.._) * mutate.getDataForWriting(double[]) >> new double[0]","(1.._) * tensor.getNDConf() >> ndConf","(1.._) * ndConf.isSimple() >> false"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"HostExecutors of Operator implementations behave as expected. [2]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Mock instances to simulate an ExecutionCall instance.","code":["var call = Mock( ExecutionCall )","var device = Mock( CPU )","var tensor = Mock( Tensor )","var mutate = Mock(MutateTensor)","var ndConf = Mock(NDConfiguration)","var hostExecutor = imp.getImplementationFor( CPU.class )","var nativeExecutor = Mock( CPU.JVMExecutor )","var dataObj = Mock(Data)"]}, + + {"kind":"when","text":"Host-executor instance is being called...","code":["hostExecutor.run( call )"]}, + + {"kind":"then","text":"The mock objects are being called as expected.","code":["(1.._) * call.arity() >> 3","(0.._) * tensor.getMut() >> mutate","(0.._) * tensor.mut() >> mutate","(1.._) * call.getDevice() >> device","1 * device.getExecutor() >> nativeExecutor","1 * nativeExecutor.threaded( _, _ )","(0.._) * call.inputs() >> new Tensor[]{ tensor, tensor, tensor }","(0.._) * call.input({it >= 0 && it <= 2}) >> tensor","(0.._) * call.input( Number.class, 0 ) >> tensor","(0.._) * call.input(0) >> tensor","(0.._) * call.input( Number.class, 1 ) >> tensor","(1.._) * tensor.size() >> 0","(0.._) * tensor.itemType >> Double","(0.._) * tensor.getDataAs(double[]) >> new double[0]","(0.._) * mutate.data >> dataObj","(0.._) * dataObj.get >> new double[0]","(0.._) * mutate.getDataAs(double[]) >> new double[0]","(0.._) * mutate.getDataForWriting(double[]) >> new double[0]","(1.._) * tensor.getNDConf() >> ndConf","(1.._) * ndConf.isSimple() >> false"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"HostExecutors of Operator implementations behave as expected. [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Mock instances to simulate an ExecutionCall instance.","code":["var call = Mock( ExecutionCall )","var device = Mock( CPU )","var tensor = Mock( Tensor )","var mutate = Mock(MutateTensor)","var ndConf = Mock(NDConfiguration)","var hostExecutor = imp.getImplementationFor( CPU.class )","var nativeExecutor = Mock( CPU.JVMExecutor )","var dataObj = Mock(Data)"]}, + + {"kind":"when","text":"Host-executor instance is being called...","code":["hostExecutor.run( call )"]}, + + {"kind":"then","text":"The mock objects are being called as expected.","code":["(1.._) * call.arity() >> 3","(0.._) * tensor.getMut() >> mutate","(0.._) * tensor.mut() >> mutate","(1.._) * call.getDevice() >> device","1 * device.getExecutor() >> nativeExecutor","1 * nativeExecutor.threaded( _, _ )","(0.._) * call.inputs() >> new Tensor[]{ tensor, tensor, tensor }","(0.._) * call.input({it >= 0 && it <= 2}) >> tensor","(0.._) * call.input( Number.class, 0 ) >> tensor","(0.._) * call.input(0) >> tensor","(0.._) * call.input( Number.class, 1 ) >> tensor","(1.._) * tensor.size() >> 0","(0.._) * tensor.itemType >> Double","(0.._) * tensor.getDataAs(double[]) >> new double[0]","(0.._) * mutate.data >> dataObj","(0.._) * dataObj.get >> new double[0]","(0.._) * mutate.getDataAs(double[]) >> new double[0]","(0.._) * mutate.getDataForWriting(double[]) >> new double[0]","(1.._) * tensor.getNDConf() >> ndConf","(1.._) * ndConf.isSimple() >> false"]}, + + {"kind":"where","text":"The variable \"imp\" is from a List of OperationType implementations of type \"Operator\".","code":{"imp":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"HostExecutors of Operator implementations behave as expected. [4]", + "result":"PASS", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.backend.core.Backend_Functional_Algorithm_Spec.json b/docs/spock/reports/ut.backend.core.Backend_Functional_Algorithm_Spec.json index 65a77338f..fad0da873 100644 --- a/docs/spock/reports/ut.backend.core.Backend_Functional_Algorithm_Spec.json +++ b/docs/spock/reports/ut.backend.core.Backend_Functional_Algorithm_Spec.json @@ -4,19 +4,61 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"3", + "runs":"7", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.011 seconds" + "duration":"0.019 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"A functional algorithm cannot be used if it was not built properly!", + "id":"A functional algorithm cannot be used if it was not built properly! [0]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a dumb test algorithm.","code":["def algorithm = new TestAlgorithm()"]}, + + {"kind":"when","text":"We call a method on the algorithm...","code":["caller(algorithm)"]}, + + {"kind":"then","text":"This should throw an illegal state exception, simply because we have not built the algorithm properly!","code":["def exception = thrown(IllegalStateException)"]}, + + {"kind":"and","text":"The exception tells us this:","code":["exception.message == \"Trying use an instance of 'TestAlgorithm' with name 'test_name' which was not fully built!\""]}, + + {"kind":"where","text":"We call the following methods:","code":{"caller":["{ Algorithm it -> it.autoDiffModeFrom(null) }","{ Algorithm it -> it.execute(null, null) }","{ Algorithm it -> it.prepare(null) }",""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A functional algorithm cannot be used if it was not built properly! [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a dumb test algorithm.","code":["def algorithm = new TestAlgorithm()"]}, + + {"kind":"when","text":"We call a method on the algorithm...","code":["caller(algorithm)"]}, + + {"kind":"then","text":"This should throw an illegal state exception, simply because we have not built the algorithm properly!","code":["def exception = thrown(IllegalStateException)"]}, + + {"kind":"and","text":"The exception tells us this:","code":["exception.message == \"Trying use an instance of 'TestAlgorithm' with name 'test_name' which was not fully built!\""]}, + + {"kind":"where","text":"We call the following methods:","code":{"caller":["{ Algorithm it -> it.autoDiffModeFrom(null) }","{ Algorithm it -> it.execute(null, null) }","{ Algorithm it -> it.prepare(null) }",""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A functional algorithm cannot be used if it was not built properly! [2]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,7 +79,7 @@ { "id":"A functional algorithm does not accept null as an answer!", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -60,7 +102,34 @@ }, { - "id":"A functional algorithm warns us when modified after it has been built!", + "id":"A functional algorithm warns us when modified after it has been built! [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a dumb test algorithm.","code":["def algorithm = new TestAlgorithm()"]}, + + {"kind":"and","text":"","code":["def oldStream = System.err","System.err = Mock(PrintStream)"]}, + + {"kind":"when","text":"We build it thoroughly...","code":["algorithm"," .setIsSuitableFor(call -> SuitabilityPredicate.EXCELLENT)"," .setAutogradModeFor( call -> AutoDiffMode.BACKWARD_ONLY )"," .setExecution((caller, call ) -> null)"," .setCallPreparation(call -> null)"," .buildFunAlgorithm()"]}, + + {"kind":"then","text":"The algorithm should be usable just fine!","code":["algorithm.isSuitableFor(null) == SuitabilityPredicate.EXCELLENT","algorithm.autoDiffModeFrom(null) == AutoDiffMode.BACKWARD_ONLY","algorithm.execute(null, null) == null","algorithm.prepare(null) == null"]}, + + {"kind":"when","text":"We try to modify the algorithm even if it is already built...","code":["setter(algorithm)"]}, + + {"kind":"then","text":"We will get a warning which wells us that mutating the state of the algorithm is discouraged!","code":["1 * System.err.println("," \"[Test worker] WARN neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm - \" +"," \"Implementation '$type.simpleName' in algorithm '$algorithm' was modified! \" +"," \"Please consider only modifying the standard backend state of Neureka for experimental reasons.\""," )"]}, + + {"kind":"cleanup","text":"","code":["System.err = oldStream"]}, + + {"kind":"where","text":"","code":{"type":["ExecutionPreparation.class","SuitabilityPredicate.class","Execution.class"],"setter":["{ TestAlgorithm it -> it.setCallPreparation( call -> null ) }","{ TestAlgorithm it -> it.setIsSuitableFor( call -> SuitabilityPredicate.NOT_GOOD ) }","{ TestAlgorithm it -> it.setExecution( (caller, call) -> Result.of(AbstractDeviceAlgorithm.executeFor(caller, call, c->null)).withAutoDiff( FallbackAlgorithm::ADAction )) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A functional algorithm warns us when modified after it has been built! [1]", "result":"PASS", "duration":"0.001 seconds", "iterations":{ @@ -84,6 +153,33 @@ {"kind":"where","text":"","code":{"type":["ExecutionPreparation.class","SuitabilityPredicate.class","Execution.class"],"setter":["{ TestAlgorithm it -> it.setCallPreparation( call -> null ) }","{ TestAlgorithm it -> it.setIsSuitableFor( call -> SuitabilityPredicate.NOT_GOOD ) }","{ TestAlgorithm it -> it.setExecution( (caller, call) -> Result.of(AbstractDeviceAlgorithm.executeFor(caller, call, c->null)).withAutoDiff( FallbackAlgorithm::ADAction )) }"]}} ], "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A functional algorithm warns us when modified after it has been built! [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a dumb test algorithm.","code":["def algorithm = new TestAlgorithm()"]}, + + {"kind":"and","text":"","code":["def oldStream = System.err","System.err = Mock(PrintStream)"]}, + + {"kind":"when","text":"We build it thoroughly...","code":["algorithm"," .setIsSuitableFor(call -> SuitabilityPredicate.EXCELLENT)"," .setAutogradModeFor( call -> AutoDiffMode.BACKWARD_ONLY )"," .setExecution((caller, call ) -> null)"," .setCallPreparation(call -> null)"," .buildFunAlgorithm()"]}, + + {"kind":"then","text":"The algorithm should be usable just fine!","code":["algorithm.isSuitableFor(null) == SuitabilityPredicate.EXCELLENT","algorithm.autoDiffModeFrom(null) == AutoDiffMode.BACKWARD_ONLY","algorithm.execute(null, null) == null","algorithm.prepare(null) == null"]}, + + {"kind":"when","text":"We try to modify the algorithm even if it is already built...","code":["setter(algorithm)"]}, + + {"kind":"then","text":"We will get a warning which wells us that mutating the state of the algorithm is discouraged!","code":["1 * System.err.println("," \"[Test worker] WARN neureka.backend.api.template.algorithms.AbstractFunDeviceAlgorithm - \" +"," \"Implementation '$type.simpleName' in algorithm '$algorithm' was modified! \" +"," \"Please consider only modifying the standard backend state of Neureka for experimental reasons.\""," )"]}, + + {"kind":"cleanup","text":"","code":["System.err = oldStream"]}, + + {"kind":"where","text":"","code":{"type":["ExecutionPreparation.class","SuitabilityPredicate.class","Execution.class"],"setter":["{ TestAlgorithm it -> it.setCallPreparation( call -> null ) }","{ TestAlgorithm it -> it.setIsSuitableFor( call -> SuitabilityPredicate.NOT_GOOD ) }","{ TestAlgorithm it -> it.setExecution( (caller, call) -> Result.of(AbstractDeviceAlgorithm.executeFor(caller, call, c->null)).withAutoDiff( FallbackAlgorithm::ADAction )) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} } ], diff --git a/docs/spock/reports/ut.backend.core.Matrix_Multiplication_Spec.json b/docs/spock/reports/ut.backend.core.Matrix_Multiplication_Spec.json index 84001b5de..60ff542b4 100644 --- a/docs/spock/reports/ut.backend.core.Matrix_Multiplication_Spec.json +++ b/docs/spock/reports/ut.backend.core.Matrix_Multiplication_Spec.json @@ -4,19 +4,228 @@ "narrative":"This specification covers library internal matrix multiplication logic,\n specifically the CPU implementation.\n Do not depend on the API used in this specification as it is subject to change!", "subjects":[], "statistics":{ - "runs":"2", + "runs":"13", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"1.154 seconds" + "duration":"10.522 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"The CPU matrix multiplication implementation works as expected.", + "id":"The CPU matrix multiplication implementation works as expected. [0]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We crete an output array and convert it to the targeted array type.","code":["var C = new double[M*N].asType(type)"]}, + + {"kind":"when","text":"We perform the matrix multiplication.","code":["CPUMatMul.execute(true, A.asType(type), B.asType(type), C, M, K, N)"]}, + + {"kind":"then","text":"The result is as expected.","code":["C == expectedC"]}, + + {"kind":"where","text":"The following data arrays and dimensions can be used for the test.","code":{"type":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","long[]","long[]","long[]"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The CPU matrix multiplication implementation works as expected. [11]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,7 +244,7 @@ { "id":"The internal matrix multiplication test script runs!", "result":"PASS", - "duration":"1.145 seconds", + "duration":"10.509 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.backend.core.OpenCL_Backend_Spec.json b/docs/spock/reports/ut.backend.core.OpenCL_Backend_Spec.json index 043c6c6be..0da159151 100644 --- a/docs/spock/reports/ut.backend.core.OpenCL_Backend_Spec.json +++ b/docs/spock/reports/ut.backend.core.OpenCL_Backend_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.006 seconds" + "duration":"0.021 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The OpenCL backend context can load implementations.", "result":"PASS", - "duration":"0.005 seconds", + "duration":"0.019 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.backend.core.Randomization_Spec.json b/docs/spock/reports/ut.backend.core.Randomization_Spec.json index f3b22e315..ef5570160 100644 --- a/docs/spock/reports/ut.backend.core.Randomization_Spec.json +++ b/docs/spock/reports/ut.backend.core.Randomization_Spec.json @@ -4,17 +4,17 @@ "narrative":"", "subjects":["neureka.backend.main.implementations.elementwise.CPURandomization"], "statistics":{ - "runs":"3", + "runs":"10", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.018 seconds" + "duration":"0.039 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"We can make slices of tensors random.", + "id":"We can make slices of tensors random. [0]", "result":"PASS", "duration":"0.005 seconds", "iterations":{ @@ -35,12 +35,159 @@ }, { - "id":"Randomization is in essence the same algorithm as JDKs \"Random\".", + "id":"We can make slices of tensors random. [1]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"","code":["var r = Function.of(\"random(I[0])\")"]}, + + {"kind":"and","text":"","code":["var t = Tensor.of(values)[1..<(values.length-1)] //"]}, + + {"kind":"when","text":"","code":["r.with(Arg.Seed.of(73)).call(t)"]}, + + {"kind":"then","text":"","code":["t.items == expected"]}, + + {"kind":"where","text":"","code":{"values":["[0,0,0,0,0] as int[]","[0,0,0,0,0] as byte[]","[0,0,0,0,0] as short[]","[0,0,0,0,0] as long[]","[0,0,0,0,0] as float[]","[0,0,0,0,0] as double[]","[0,0,0,0,0] as char[]","[0,0,0,0,0] as boolean[]"],"expected":["[1682321148, -442781155, 1450818241] as int[]","[-4, 29, -63] as byte[]","[12028, -19939, -17727] as short[]","[7225514313620035527, -1901730580994377074, 6231216898071663791] as long[]","[1.0493218, 0.96351904, 1.3803823] as float[]","[1.0493217368660326, 0.9635190511757348, 1.3803823236577413] as double[]","[12028, 45597, 47809] as char[]","[false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can make slices of tensors random. [2]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var r = Function.of(\"random(I[0])\")"]}, + + {"kind":"and","text":"","code":["var t = Tensor.of(values)[1..<(values.length-1)] //"]}, + + {"kind":"when","text":"","code":["r.with(Arg.Seed.of(73)).call(t)"]}, + + {"kind":"then","text":"","code":["t.items == expected"]}, + + {"kind":"where","text":"","code":{"values":["[0,0,0,0,0] as int[]","[0,0,0,0,0] as byte[]","[0,0,0,0,0] as short[]","[0,0,0,0,0] as long[]","[0,0,0,0,0] as float[]","[0,0,0,0,0] as double[]","[0,0,0,0,0] as char[]","[0,0,0,0,0] as boolean[]"],"expected":["[1682321148, -442781155, 1450818241] as int[]","[-4, 29, -63] as byte[]","[12028, -19939, -17727] as short[]","[7225514313620035527, -1901730580994377074, 6231216898071663791] as long[]","[1.0493218, 0.96351904, 1.3803823] as float[]","[1.0493217368660326, 0.9635190511757348, 1.3803823236577413] as double[]","[12028, 45597, 47809] as char[]","[false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can make slices of tensors random. [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var r = Function.of(\"random(I[0])\")"]}, + + {"kind":"and","text":"","code":["var t = Tensor.of(values)[1..<(values.length-1)] //"]}, + + {"kind":"when","text":"","code":["r.with(Arg.Seed.of(73)).call(t)"]}, + + {"kind":"then","text":"","code":["t.items == expected"]}, + + {"kind":"where","text":"","code":{"values":["[0,0,0,0,0] as int[]","[0,0,0,0,0] as byte[]","[0,0,0,0,0] as short[]","[0,0,0,0,0] as long[]","[0,0,0,0,0] as float[]","[0,0,0,0,0] as double[]","[0,0,0,0,0] as char[]","[0,0,0,0,0] as boolean[]"],"expected":["[1682321148, -442781155, 1450818241] as int[]","[-4, 29, -63] as byte[]","[12028, -19939, -17727] as short[]","[7225514313620035527, -1901730580994377074, 6231216898071663791] as long[]","[1.0493218, 0.96351904, 1.3803823] as float[]","[1.0493217368660326, 0.9635190511757348, 1.3803823236577413] as double[]","[12028, 45597, 47809] as char[]","[false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can make slices of tensors random. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var r = Function.of(\"random(I[0])\")"]}, + + {"kind":"and","text":"","code":["var t = Tensor.of(values)[1..<(values.length-1)] //"]}, + + {"kind":"when","text":"","code":["r.with(Arg.Seed.of(73)).call(t)"]}, + + {"kind":"then","text":"","code":["t.items == expected"]}, + + {"kind":"where","text":"","code":{"values":["[0,0,0,0,0] as int[]","[0,0,0,0,0] as byte[]","[0,0,0,0,0] as short[]","[0,0,0,0,0] as long[]","[0,0,0,0,0] as float[]","[0,0,0,0,0] as double[]","[0,0,0,0,0] as char[]","[0,0,0,0,0] as boolean[]"],"expected":["[1682321148, -442781155, 1450818241] as int[]","[-4, 29, -63] as byte[]","[12028, -19939, -17727] as short[]","[7225514313620035527, -1901730580994377074, 6231216898071663791] as long[]","[1.0493218, 0.96351904, 1.3803823] as float[]","[1.0493217368660326, 0.9635190511757348, 1.3803823236577413] as double[]","[12028, 45597, 47809] as char[]","[false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can make slices of tensors random. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var r = Function.of(\"random(I[0])\")"]}, + + {"kind":"and","text":"","code":["var t = Tensor.of(values)[1..<(values.length-1)] //"]}, + + {"kind":"when","text":"","code":["r.with(Arg.Seed.of(73)).call(t)"]}, + + {"kind":"then","text":"","code":["t.items == expected"]}, + + {"kind":"where","text":"","code":{"values":["[0,0,0,0,0] as int[]","[0,0,0,0,0] as byte[]","[0,0,0,0,0] as short[]","[0,0,0,0,0] as long[]","[0,0,0,0,0] as float[]","[0,0,0,0,0] as double[]","[0,0,0,0,0] as char[]","[0,0,0,0,0] as boolean[]"],"expected":["[1682321148, -442781155, 1450818241] as int[]","[-4, 29, -63] as byte[]","[12028, -19939, -17727] as short[]","[7225514313620035527, -1901730580994377074, 6231216898071663791] as long[]","[1.0493218, 0.96351904, 1.3803823] as float[]","[1.0493217368660326, 0.9635190511757348, 1.3803823236577413] as double[]","[12028, 45597, 47809] as char[]","[false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can make slices of tensors random. [6]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var r = Function.of(\"random(I[0])\")"]}, + + {"kind":"and","text":"","code":["var t = Tensor.of(values)[1..<(values.length-1)] //"]}, + + {"kind":"when","text":"","code":["r.with(Arg.Seed.of(73)).call(t)"]}, + + {"kind":"then","text":"","code":["t.items == expected"]}, + + {"kind":"where","text":"","code":{"values":["[0,0,0,0,0] as int[]","[0,0,0,0,0] as byte[]","[0,0,0,0,0] as short[]","[0,0,0,0,0] as long[]","[0,0,0,0,0] as float[]","[0,0,0,0,0] as double[]","[0,0,0,0,0] as char[]","[0,0,0,0,0] as boolean[]"],"expected":["[1682321148, -442781155, 1450818241] as int[]","[-4, 29, -63] as byte[]","[12028, -19939, -17727] as short[]","[7225514313620035527, -1901730580994377074, 6231216898071663791] as long[]","[1.0493218, 0.96351904, 1.3803823] as float[]","[1.0493217368660326, 0.9635190511757348, 1.3803823236577413] as double[]","[12028, 45597, 47809] as char[]","[false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can make slices of tensors random. [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var r = Function.of(\"random(I[0])\")"]}, + + {"kind":"and","text":"","code":["var t = Tensor.of(values)[1..<(values.length-1)] //"]}, + + {"kind":"when","text":"","code":["r.with(Arg.Seed.of(73)).call(t)"]}, + + {"kind":"then","text":"","code":["t.items == expected"]}, + + {"kind":"where","text":"","code":{"values":["[0,0,0,0,0] as int[]","[0,0,0,0,0] as byte[]","[0,0,0,0,0] as short[]","[0,0,0,0,0] as long[]","[0,0,0,0,0] as float[]","[0,0,0,0,0] as double[]","[0,0,0,0,0] as char[]","[0,0,0,0,0] as boolean[]"],"expected":["[1682321148, -442781155, 1450818241] as int[]","[-4, 29, -63] as byte[]","[12028, -19939, -17727] as short[]","[7225514313620035527, -1901730580994377074, 6231216898071663791] as long[]","[1.0493218, 0.96351904, 1.3803823] as float[]","[1.0493217368660326, 0.9635190511757348, 1.3803823236577413] as double[]","[12028, 45597, 47809] as char[]","[false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Randomization is in essence the same algorithm as JDKs \"Random\".", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"","code":["var random = new Random()","var seed = CPURandomization.initialScramble(666_42_666)","var r2 = [0,0] as double[]"]}, @@ -56,7 +203,7 @@ { "id":"The Randomization class can fill various types of arrays with pseudo random numbers.", "result":"PASS", - "duration":"0.003 seconds", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.device.CPU_Spec.json b/docs/spock/reports/ut.device.CPU_Spec.json index b308ba9a5..6532a46c5 100644 --- a/docs/spock/reports/ut.device.CPU_Spec.json +++ b/docs/spock/reports/ut.device.CPU_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.device.CPU_Spec", "title":"The CPU device, an API for CPU based execution", - "narrative":"The CPU class, one of many implementations of the Device interface,\n is simply supposed to be an API for dispatching threaded workloads onto the CPU.\n Contrary to other types of device, the CPU will host tensor data by default, simply\n because the tensors will be stored in RAM if no device was specified.", + "narrative":"The CPU class, one of many implementations of the Device interface, \n is simply supposed to be an API for dispatching threaded workloads onto the CPU.\n Contrary to other types of device, the CPU will host tensor data by default, simply\n because the tensors will be stored in RAM if no device was specified.", "subjects":["neureka.devices.host.CPU","neureka.devices.Device"], "statistics":{ "runs":"4", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.704 seconds" + "duration":"5.707 seconds" }, "headers":[" \n

        \n The thread pool of the class neureka.devices.host.CPU executor becomes\n more active when receiving larger workloads which\n benefit from parallelization. \n

        \n "],"tags":{},"see":[], "features":[ { "id":"Thread pool executes given workload in parallel", "result":"PASS", - "duration":"0.504 seconds", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Warning! This test is flaky simply because it relies on the behaviour of threads\n which may or may not behave as expected. \n "] }, @@ -39,7 +39,7 @@ { "id":"CPU knows the current number of available processor cores!", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,7 @@ { "id":"The CPU exposes a non null API for executing workloads in parallel.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -67,7 +67,7 @@ { "id":"The CPU device will keep track of the amount of tensors it stores.", "result":"PASS", - "duration":"0.181 seconds", + "duration":"5.697 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.device.Cross_Device_IO_Spec.json b/docs/spock/reports/ut.device.Cross_Device_IO_Spec.json index 68d316820..619846156 100644 --- a/docs/spock/reports/ut.device.Cross_Device_IO_Spec.json +++ b/docs/spock/reports/ut.device.Cross_Device_IO_Spec.json @@ -4,17 +4,98 @@ "narrative":"Tensors should not manage their states\n themselves, simply because the type and location\n of the data is dependent on the device onto which they are stored.\n This specification tests of various device implementations\n enable reading to or writing from the tensors they store.", "subjects":["neureka.devices.Device","neureka.Tensor"], "statistics":{ - "runs":"2", + "runs":"15", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.015 seconds" + "duration":"0.072 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"We can use the access device API to read from a tensor.", + "id":"We can use the access device API to read from a tensor. [0]", + "result":"PASS", + "duration":"0.007 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"expect","text":"The device reports that both tensors have the same data array size.","code":["device.access(t).dataSize == 4","device.access(s).dataSize == 4"]}, + + {"kind":"and","text":"Reading the underlying tensor data from the device will yield the expected result.","code":["device.access(t).readAll(false) == expected","device.access(s).readAll(false) == expected","device.access(t).readAll(true) == expected","device.access(s).readAll(true) == expected"]}, + + {"kind":"and","text":"This is also true for when reading at a particular index.","code":["device.access(t).readArray(arrayType, 1, 1) == [expected[1]]","device.access(s).readArray(arrayType, 1, 1) == [expected[1]]","device.access(t).readArray(arrayType, 2, 1) == [expected[2]]","device.access(s).readArray(arrayType, 2, 1) == [expected[2]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float","Character"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]","['6','a']"],"expected":["[2, 1, 2, 1]","[2,7,8,2]","[6,2,6,6]","[6,2,7,6]","[3.4d, 3.0d, 3.4d, 3.0d]","[5.7f, -1.0f, 5.7f, -1.0f]","[5.7f, -1.0f, 5.7f, -1.0f]","['6','a','6','a']"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to read from a tensor. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"expect","text":"The device reports that both tensors have the same data array size.","code":["device.access(t).dataSize == 4","device.access(s).dataSize == 4"]}, + + {"kind":"and","text":"Reading the underlying tensor data from the device will yield the expected result.","code":["device.access(t).readAll(false) == expected","device.access(s).readAll(false) == expected","device.access(t).readAll(true) == expected","device.access(s).readAll(true) == expected"]}, + + {"kind":"and","text":"This is also true for when reading at a particular index.","code":["device.access(t).readArray(arrayType, 1, 1) == [expected[1]]","device.access(s).readArray(arrayType, 1, 1) == [expected[1]]","device.access(t).readArray(arrayType, 2, 1) == [expected[2]]","device.access(s).readArray(arrayType, 2, 1) == [expected[2]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float","Character"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]","['6','a']"],"expected":["[2, 1, 2, 1]","[2,7,8,2]","[6,2,6,6]","[6,2,7,6]","[3.4d, 3.0d, 3.4d, 3.0d]","[5.7f, -1.0f, 5.7f, -1.0f]","[5.7f, -1.0f, 5.7f, -1.0f]","['6','a','6','a']"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to read from a tensor. [2]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"expect","text":"The device reports that both tensors have the same data array size.","code":["device.access(t).dataSize == 4","device.access(s).dataSize == 4"]}, + + {"kind":"and","text":"Reading the underlying tensor data from the device will yield the expected result.","code":["device.access(t).readAll(false) == expected","device.access(s).readAll(false) == expected","device.access(t).readAll(true) == expected","device.access(s).readAll(true) == expected"]}, + + {"kind":"and","text":"This is also true for when reading at a particular index.","code":["device.access(t).readArray(arrayType, 1, 1) == [expected[1]]","device.access(s).readArray(arrayType, 1, 1) == [expected[1]]","device.access(t).readArray(arrayType, 2, 1) == [expected[2]]","device.access(s).readArray(arrayType, 2, 1) == [expected[2]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float","Character"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]","['6','a']"],"expected":["[2, 1, 2, 1]","[2,7,8,2]","[6,2,6,6]","[6,2,7,6]","[3.4d, 3.0d, 3.4d, 3.0d]","[5.7f, -1.0f, 5.7f, -1.0f]","[5.7f, -1.0f, 5.7f, -1.0f]","['6','a','6','a']"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to read from a tensor. [3]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ @@ -41,7 +122,240 @@ }, { - "id":"We can use the access device API to write to a tensor", + "id":"We can use the access device API to read from a tensor. [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"expect","text":"The device reports that both tensors have the same data array size.","code":["device.access(t).dataSize == 4","device.access(s).dataSize == 4"]}, + + {"kind":"and","text":"Reading the underlying tensor data from the device will yield the expected result.","code":["device.access(t).readAll(false) == expected","device.access(s).readAll(false) == expected","device.access(t).readAll(true) == expected","device.access(s).readAll(true) == expected"]}, + + {"kind":"and","text":"This is also true for when reading at a particular index.","code":["device.access(t).readArray(arrayType, 1, 1) == [expected[1]]","device.access(s).readArray(arrayType, 1, 1) == [expected[1]]","device.access(t).readArray(arrayType, 2, 1) == [expected[2]]","device.access(s).readArray(arrayType, 2, 1) == [expected[2]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float","Character"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]","['6','a']"],"expected":["[2, 1, 2, 1]","[2,7,8,2]","[6,2,6,6]","[6,2,7,6]","[3.4d, 3.0d, 3.4d, 3.0d]","[5.7f, -1.0f, 5.7f, -1.0f]","[5.7f, -1.0f, 5.7f, -1.0f]","['6','a','6','a']"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to read from a tensor. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"expect","text":"The device reports that both tensors have the same data array size.","code":["device.access(t).dataSize == 4","device.access(s).dataSize == 4"]}, + + {"kind":"and","text":"Reading the underlying tensor data from the device will yield the expected result.","code":["device.access(t).readAll(false) == expected","device.access(s).readAll(false) == expected","device.access(t).readAll(true) == expected","device.access(s).readAll(true) == expected"]}, + + {"kind":"and","text":"This is also true for when reading at a particular index.","code":["device.access(t).readArray(arrayType, 1, 1) == [expected[1]]","device.access(s).readArray(arrayType, 1, 1) == [expected[1]]","device.access(t).readArray(arrayType, 2, 1) == [expected[2]]","device.access(s).readArray(arrayType, 2, 1) == [expected[2]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float","Character"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]","['6','a']"],"expected":["[2, 1, 2, 1]","[2,7,8,2]","[6,2,6,6]","[6,2,7,6]","[3.4d, 3.0d, 3.4d, 3.0d]","[5.7f, -1.0f, 5.7f, -1.0f]","[5.7f, -1.0f, 5.7f, -1.0f]","['6','a','6','a']"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to read from a tensor. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"expect","text":"The device reports that both tensors have the same data array size.","code":["device.access(t).dataSize == 4","device.access(s).dataSize == 4"]}, + + {"kind":"and","text":"Reading the underlying tensor data from the device will yield the expected result.","code":["device.access(t).readAll(false) == expected","device.access(s).readAll(false) == expected","device.access(t).readAll(true) == expected","device.access(s).readAll(true) == expected"]}, + + {"kind":"and","text":"This is also true for when reading at a particular index.","code":["device.access(t).readArray(arrayType, 1, 1) == [expected[1]]","device.access(s).readArray(arrayType, 1, 1) == [expected[1]]","device.access(t).readArray(arrayType, 2, 1) == [expected[2]]","device.access(s).readArray(arrayType, 2, 1) == [expected[2]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float","Character"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]","['6','a']"],"expected":["[2, 1, 2, 1]","[2,7,8,2]","[6,2,6,6]","[6,2,7,6]","[3.4d, 3.0d, 3.4d, 3.0d]","[5.7f, -1.0f, 5.7f, -1.0f]","[5.7f, -1.0f, 5.7f, -1.0f]","['6','a','6','a']"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to read from a tensor. [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"expect","text":"The device reports that both tensors have the same data array size.","code":["device.access(t).dataSize == 4","device.access(s).dataSize == 4"]}, + + {"kind":"and","text":"Reading the underlying tensor data from the device will yield the expected result.","code":["device.access(t).readAll(false) == expected","device.access(s).readAll(false) == expected","device.access(t).readAll(true) == expected","device.access(s).readAll(true) == expected"]}, + + {"kind":"and","text":"This is also true for when reading at a particular index.","code":["device.access(t).readArray(arrayType, 1, 1) == [expected[1]]","device.access(s).readArray(arrayType, 1, 1) == [expected[1]]","device.access(t).readArray(arrayType, 2, 1) == [expected[2]]","device.access(s).readArray(arrayType, 2, 1) == [expected[2]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float","Character"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]","['6','a']"],"expected":["[2, 1, 2, 1]","[2,7,8,2]","[6,2,6,6]","[6,2,7,6]","[3.4d, 3.0d, 3.4d, 3.0d]","[5.7f, -1.0f, 5.7f, -1.0f]","[5.7f, -1.0f, 5.7f, -1.0f]","['6','a','6','a']"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to write to a tensor [0]", + "result":"PASS", + "duration":"0.008 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"when","text":"We write some test data into different ranges within the 2 tensors.","code":["device.access(t).writeFrom(write, 0).intoRange(0,2)","device.access(s).writeFrom(write, 0).intoRange(2,4)"]}, + + {"kind":"then","text":"Reading the previously written data should yield the expected result.","code":["device.access(t).readArray(arrayType, 0, 2) == [expected[0],expected[1]]","device.access(s).readArray(arrayType, 2, 2) == [expected[2],expected[3]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]"],"write":["[5]","[7]","[1]","[8]","[3]","[4]","[8]"],"expected":["[5, 1, 5, 1]","[7,7,7,2]","[1,2,1,6]","[8,2,8,6]","[3d, 3.0d, 3d, 3.0d]","[4f, -1.0f, 4f, -1.0f]","[8f, -1.0f, 8f, -1.0f]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to write to a tensor [1]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"when","text":"We write some test data into different ranges within the 2 tensors.","code":["device.access(t).writeFrom(write, 0).intoRange(0,2)","device.access(s).writeFrom(write, 0).intoRange(2,4)"]}, + + {"kind":"then","text":"Reading the previously written data should yield the expected result.","code":["device.access(t).readArray(arrayType, 0, 2) == [expected[0],expected[1]]","device.access(s).readArray(arrayType, 2, 2) == [expected[2],expected[3]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]"],"write":["[5]","[7]","[1]","[8]","[3]","[4]","[8]"],"expected":["[5, 1, 5, 1]","[7,7,7,2]","[1,2,1,6]","[8,2,8,6]","[3d, 3.0d, 3d, 3.0d]","[4f, -1.0f, 4f, -1.0f]","[8f, -1.0f, 8f, -1.0f]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to write to a tensor [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"when","text":"We write some test data into different ranges within the 2 tensors.","code":["device.access(t).writeFrom(write, 0).intoRange(0,2)","device.access(s).writeFrom(write, 0).intoRange(2,4)"]}, + + {"kind":"then","text":"Reading the previously written data should yield the expected result.","code":["device.access(t).readArray(arrayType, 0, 2) == [expected[0],expected[1]]","device.access(s).readArray(arrayType, 2, 2) == [expected[2],expected[3]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]"],"write":["[5]","[7]","[1]","[8]","[3]","[4]","[8]"],"expected":["[5, 1, 5, 1]","[7,7,7,2]","[1,2,1,6]","[8,2,8,6]","[3d, 3.0d, 3d, 3.0d]","[4f, -1.0f, 4f, -1.0f]","[8f, -1.0f, 8f, -1.0f]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to write to a tensor [3]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"when","text":"We write some test data into different ranges within the 2 tensors.","code":["device.access(t).writeFrom(write, 0).intoRange(0,2)","device.access(s).writeFrom(write, 0).intoRange(2,4)"]}, + + {"kind":"then","text":"Reading the previously written data should yield the expected result.","code":["device.access(t).readArray(arrayType, 0, 2) == [expected[0],expected[1]]","device.access(s).readArray(arrayType, 2, 2) == [expected[2],expected[3]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]"],"write":["[5]","[7]","[1]","[8]","[3]","[4]","[8]"],"expected":["[5, 1, 5, 1]","[7,7,7,2]","[1,2,1,6]","[8,2,8,6]","[3d, 3.0d, 3d, 3.0d]","[4f, -1.0f, 4f, -1.0f]","[8f, -1.0f, 8f, -1.0f]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to write to a tensor [4]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"when","text":"We write some test data into different ranges within the 2 tensors.","code":["device.access(t).writeFrom(write, 0).intoRange(0,2)","device.access(s).writeFrom(write, 0).intoRange(2,4)"]}, + + {"kind":"then","text":"Reading the previously written data should yield the expected result.","code":["device.access(t).readArray(arrayType, 0, 2) == [expected[0],expected[1]]","device.access(s).readArray(arrayType, 2, 2) == [expected[2],expected[3]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]"],"write":["[5]","[7]","[1]","[8]","[3]","[4]","[8]"],"expected":["[5, 1, 5, 1]","[7,7,7,2]","[1,2,1,6]","[8,2,8,6]","[3d, 3.0d, 3d, 3.0d]","[4f, -1.0f, 4f, -1.0f]","[8f, -1.0f, 8f, -1.0f]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to write to a tensor [5]", "result":"PASS", "duration":"0.001 seconds", "iterations":{ @@ -63,6 +377,31 @@ {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]"],"write":["[5]","[7]","[1]","[8]","[3]","[4]","[8]"],"expected":["[5, 1, 5, 1]","[7,7,7,2]","[1,2,1,6]","[8,2,8,6]","[3d, 3.0d, 3d, 3.0d]","[4f, -1.0f, 4f, -1.0f]","[8f, -1.0f, 8f, -1.0f]"]}} ], "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the access device API to write to a tensor [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We fetch the required device instance from its interface.","code":["var device = Device.get(deviceType)"]}, + + {"kind":"and","text":"We fetch the array type of the tested data type!","code":["var arrayType = DataType.of(type).dataArrayType()"]}, + + {"kind":"and","text":"A tensor filled with 4 values which we are going to store on the previously fetched device.","code":["var t = Tensor.of(type).withShape(4).andFill(fill).to(device)"]}, + + {"kind":"and","text":"A slice from the above tensor.","code":["var s = t[1..2]"]}, + + {"kind":"when","text":"We write some test data into different ranges within the 2 tensors.","code":["device.access(t).writeFrom(write, 0).intoRange(0,2)","device.access(s).writeFrom(write, 0).intoRange(2,4)"]}, + + {"kind":"then","text":"Reading the previously written data should yield the expected result.","code":["device.access(t).readArray(arrayType, 0, 2) == [expected[0],expected[1]]","device.access(s).readArray(arrayType, 2, 2) == [expected[2],expected[3]]"]}, + + {"kind":"where","text":"The parameters in the above code can have the following states:","code":{"deviceType":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Integer","Short","Long","Byte","Double","Float","Float"],"fill":["[2, 1]","[2,7,8]","[6,2,6]","[6,2,7]","[3.4, 3]","[5.7,-1]","[5.7,-1]"],"write":["[5]","[7]","[1]","[8]","[3]","[4]","[8]"],"expected":["[5, 1, 5, 1]","[7,7,7,2]","[1,2,1,6]","[8,2,8,6]","[3d, 3.0d, 3d, 3.0d]","[4f, -1.0f, 4f, -1.0f]","[8f, -1.0f, 8f, -1.0f]"]}} + ], + "problems":{"dataValues":[], "errors":[]} } ], diff --git a/docs/spock/reports/ut.device.Cross_Device_Type_Spec.json b/docs/spock/reports/ut.device.Cross_Device_Type_Spec.json index b42332870..0f7cbcc4d 100644 --- a/docs/spock/reports/ut.device.Cross_Device_Type_Spec.json +++ b/docs/spock/reports/ut.device.Cross_Device_Type_Spec.json @@ -1,24 +1,24 @@ { "className":"ut.device.Cross_Device_Type_Spec", "title":"Finding Device Types", - "narrative":"Neureka introduces a the concept of a `Device` which is an interface\n that represents a computational device used for executing tensor / nd-array operations on them.\n The `Device` interface is implemented by various classes which represent\n different types of accelerator hardware such as `CPUs`, `GPUs`, `TPUs`, `FPGAs`, etc.\n These various `Device` types can not be instantiated directly because they model\n the concrete and finite hardware that is available on any given system Neureka is running on.\n This means that they are usually instantiated lazily upon access request or\n upfront by the library backend (usually a backend extension built fo a specific device).\n In order to find these instances embedded in the library backend the `Device` interface\n exposes various static methods which can be used to find a device instance by name or type.", + "narrative":"Neureka introduces a the concept of a `Device` which is an interface\n that represents a computational device used for executing tensor / nd-array operations on them.\n The `Device` interface is implemented by various classes which represent\n different types of accelerator hardware such as `CPUs`, `GPUs`, `TPUs`, `FPGAs`, etc.\n These various `Device` types can not be instantiated directly because they model \n the concrete and finite hardware that is available on any given system Neureka is running on.\n This means that they are usually instantiated lazily upon access request or \n upfront by the library backend (usually a backend extension built fo a specific device).\n In order to find these instances embedded in the library backend the `Device` interface\n exposes various static methods which can be used to find a device instance by name or type.", "subjects":["neureka.devices.Device"], "statistics":{ - "runs":"10", + "runs":"53", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"1", - "duration":"0.834 seconds" + "duration":"6.152 seconds" }, "headers":["\n Specified below is the behaviour of the factory methods in the\n Device interface as well as its various implementations \n which should adhere to a certain set of common behaviours.\n "],"tags":{},"see":[], "features":[ { - "id":"We can find Device implementations or null by passing search keys to the \"get\" method.", + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [0]", "result":"PASS", - "duration":"0.012 seconds", + "duration":"0.004 seconds", "iterations":{ - "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n ","\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n "] + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n "] }, "blocks":[ {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, @@ -33,64 +33,857 @@ }, { - "id":"We can query the backend for devices by specifying both the requested type and a key word.", + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n "] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n "] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n "] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" method are dependent on the current \n system and the available hardware.\n "] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can find Device implementations or null by passing search keys to the \"get\" method. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We pass a query key word to the \"get\" method...","code":["var device = Device.get(query)"]}, + + {"kind":"then","text":"...the result should be a non-null device (if our query key matches something).","code":["device != null"]}, + + {"kind":"and","text":"The resulting Device variable has the expected type (CPU, OpenCLDevice, ...).","code":["device.class == type"]}, + + {"kind":"where","text":"The query key words and the expected device types returned.","code":{"query":["\"cPu\"","\"jVm\"","\"natiVe\"","\"Threaded\"","\"first CPU\"","\"openCl\"","\"first gpu\"","\"nvidia or amd or intel\"","\"rocm or cuda or amd or nvidia\"","\"nvidia amd or intel\"","\"cudarocm gpu\"","\"first\""],"type":["CPU","CPU","CPU","CPU","CPU","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice","OpenCLDevice"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can query the backend for devices by specifying both the requested type and a key word. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" and \"find\" methods are dependent on the current \n system and the available hardware that Neureka encounters.\n "] + }, + "blocks":[ + {"kind":"expect","text":"Querying for a device using a device type and key works as expected.","code":["Device.get(type, key) === expected"]}, + + {"kind":"and","text":"We can use the \"find\" method if we want the result to be wrapped in a nice and safe Optional instance.","code":["!Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following device type, key and expected device instance.","code":{"type":["Device","Device","Device","DummyDevice","DummyDevice","DummyDevice","DummyDevice"],"key":["'cpu'","'jvm'","'processor'","'first'","'any'","'cpu'","'gpu'"],"expected":["CPU.get()","CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can query the backend for devices by specifying both the requested type and a key word. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" and \"find\" methods are dependent on the current \n system and the available hardware that Neureka encounters.\n "] + }, + "blocks":[ + {"kind":"expect","text":"Querying for a device using a device type and key works as expected.","code":["Device.get(type, key) === expected"]}, + + {"kind":"and","text":"We can use the \"find\" method if we want the result to be wrapped in a nice and safe Optional instance.","code":["!Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following device type, key and expected device instance.","code":{"type":["Device","Device","Device","DummyDevice","DummyDevice","DummyDevice","DummyDevice"],"key":["'cpu'","'jvm'","'processor'","'first'","'any'","'cpu'","'gpu'"],"expected":["CPU.get()","CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can query the backend for devices by specifying both the requested type and a key word. [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" and \"find\" methods are dependent on the current \n system and the available hardware that Neureka encounters.\n "] + }, + "blocks":[ + {"kind":"expect","text":"Querying for a device using a device type and key works as expected.","code":["Device.get(type, key) === expected"]}, + + {"kind":"and","text":"We can use the \"find\" method if we want the result to be wrapped in a nice and safe Optional instance.","code":["!Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following device type, key and expected device instance.","code":{"type":["Device","Device","Device","DummyDevice","DummyDevice","DummyDevice","DummyDevice"],"key":["'cpu'","'jvm'","'processor'","'first'","'any'","'cpu'","'gpu'"],"expected":["CPU.get()","CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can query the backend for devices by specifying both the requested type and a key word. [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" and \"find\" methods are dependent on the current \n system and the available hardware that Neureka encounters.\n "] + }, + "blocks":[ + {"kind":"expect","text":"Querying for a device using a device type and key works as expected.","code":["Device.get(type, key) === expected"]}, + + {"kind":"and","text":"We can use the \"find\" method if we want the result to be wrapped in a nice and safe Optional instance.","code":["!Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following device type, key and expected device instance.","code":{"type":["Device","Device","Device","DummyDevice","DummyDevice","DummyDevice","DummyDevice"],"key":["'cpu'","'jvm'","'processor'","'first'","'any'","'cpu'","'gpu'"],"expected":["CPU.get()","CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can query the backend for devices by specifying both the requested type and a key word. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" and \"find\" methods are dependent on the current \n system and the available hardware that Neureka encounters.\n "] + }, + "blocks":[ + {"kind":"expect","text":"Querying for a device using a device type and key works as expected.","code":["Device.get(type, key) === expected"]}, + + {"kind":"and","text":"We can use the \"find\" method if we want the result to be wrapped in a nice and safe Optional instance.","code":["!Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following device type, key and expected device instance.","code":{"type":["Device","Device","Device","DummyDevice","DummyDevice","DummyDevice","DummyDevice"],"key":["'cpu'","'jvm'","'processor'","'first'","'any'","'cpu'","'gpu'"],"expected":["CPU.get()","CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can query the backend for devices by specifying both the requested type and a key word. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" and \"find\" methods are dependent on the current \n system and the available hardware that Neureka encounters.\n "] + }, + "blocks":[ + {"kind":"expect","text":"Querying for a device using a device type and key works as expected.","code":["Device.get(type, key) === expected"]}, + + {"kind":"and","text":"We can use the \"find\" method if we want the result to be wrapped in a nice and safe Optional instance.","code":["!Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following device type, key and expected device instance.","code":{"type":["Device","Device","Device","DummyDevice","DummyDevice","DummyDevice","DummyDevice"],"key":["'cpu'","'jvm'","'processor'","'first'","'any'","'cpu'","'gpu'"],"expected":["CPU.get()","CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can query the backend for devices by specifying both the requested type and a key word. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that the examples specified in the table below\n may not be the same on every system because the devices\n returned by the \"get\" and \"find\" methods are dependent on the current \n system and the available hardware that Neureka encounters.\n "] + }, + "blocks":[ + {"kind":"expect","text":"Querying for a device using a device type and key works as expected.","code":["Device.get(type, key) === expected"]}, + + {"kind":"and","text":"We can use the \"find\" method if we want the result to be wrapped in a nice and safe Optional instance.","code":["!Device.find(type, key).isPresent() && expected == null || Device.find(type, key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following device type, key and expected device instance.","code":{"type":["Device","Device","Device","DummyDevice","DummyDevice","DummyDevice","DummyDevice"],"key":["'cpu'","'jvm'","'processor'","'first'","'any'","'cpu'","'gpu'"],"expected":["CPU.get()","CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"In total there are 3 different types of methods for finding device instances. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The \"get\" method returns a device instance or null.","code":["Device.get(key) === expected"]}, + + {"kind":"and","text":"The \"any\" method returns a device instance or the \"CPU device\", which is the library default device.","code":["Device.any(key) === expected || Device.any(key) === CPU.get()"]}, + + {"kind":"and","text":"The \"find\" method returns a device instance wrapped in an Optional instance.","code":["!Device.find(key).isPresent() && expected == null || Device.find(key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following search key and expected device instance.","code":{"key":["'cpu'","'central processing'","'e9rt56hqfwe5f0'","'5638135dh90978'","'banana device'","'cupcake'"],"expected":["CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"In total there are 3 different types of methods for finding device instances. [1]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The \"get\" method returns a device instance or null.","code":["Device.get(key) === expected"]}, + + {"kind":"and","text":"The \"any\" method returns a device instance or the \"CPU device\", which is the library default device.","code":["Device.any(key) === expected || Device.any(key) === CPU.get()"]}, + + {"kind":"and","text":"The \"find\" method returns a device instance wrapped in an Optional instance.","code":["!Device.find(key).isPresent() && expected == null || Device.find(key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following search key and expected device instance.","code":{"key":["'cpu'","'central processing'","'e9rt56hqfwe5f0'","'5638135dh90978'","'banana device'","'cupcake'"],"expected":["CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"In total there are 3 different types of methods for finding device instances. [2]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The \"get\" method returns a device instance or null.","code":["Device.get(key) === expected"]}, + + {"kind":"and","text":"The \"any\" method returns a device instance or the \"CPU device\", which is the library default device.","code":["Device.any(key) === expected || Device.any(key) === CPU.get()"]}, + + {"kind":"and","text":"The \"find\" method returns a device instance wrapped in an Optional instance.","code":["!Device.find(key).isPresent() && expected == null || Device.find(key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following search key and expected device instance.","code":{"key":["'cpu'","'central processing'","'e9rt56hqfwe5f0'","'5638135dh90978'","'banana device'","'cupcake'"],"expected":["CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"In total there are 3 different types of methods for finding device instances. [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The \"get\" method returns a device instance or null.","code":["Device.get(key) === expected"]}, + + {"kind":"and","text":"The \"any\" method returns a device instance or the \"CPU device\", which is the library default device.","code":["Device.any(key) === expected || Device.any(key) === CPU.get()"]}, + + {"kind":"and","text":"The \"find\" method returns a device instance wrapped in an Optional instance.","code":["!Device.find(key).isPresent() && expected == null || Device.find(key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following search key and expected device instance.","code":{"key":["'cpu'","'central processing'","'e9rt56hqfwe5f0'","'5638135dh90978'","'banana device'","'cupcake'"],"expected":["CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"In total there are 3 different types of methods for finding device instances. [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The \"get\" method returns a device instance or null.","code":["Device.get(key) === expected"]}, + + {"kind":"and","text":"The \"any\" method returns a device instance or the \"CPU device\", which is the library default device.","code":["Device.any(key) === expected || Device.any(key) === CPU.get()"]}, + + {"kind":"and","text":"The \"find\" method returns a device instance wrapped in an Optional instance.","code":["!Device.find(key).isPresent() && expected == null || Device.find(key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following search key and expected device instance.","code":{"key":["'cpu'","'central processing'","'e9rt56hqfwe5f0'","'5638135dh90978'","'banana device'","'cupcake'"],"expected":["CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"In total there are 3 different types of methods for finding device instances. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The \"get\" method returns a device instance or null.","code":["Device.get(key) === expected"]}, + + {"kind":"and","text":"The \"any\" method returns a device instance or the \"CPU device\", which is the library default device.","code":["Device.any(key) === expected || Device.any(key) === CPU.get()"]}, + + {"kind":"and","text":"The \"find\" method returns a device instance wrapped in an Optional instance.","code":["!Device.find(key).isPresent() && expected == null || Device.find(key).get() === expected"]}, + + {"kind":"where","text":"The we can use the following search key and expected device instance.","code":{"key":["'cpu'","'central processing'","'e9rt56hqfwe5f0'","'5638135dh90978'","'banana device'","'cupcake'"],"expected":["CPU.get()","CPU.get()","null","null","null","null"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [0]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Here we demonstrate that a tensor located on a non-CPU `Device` will be\n updated when passing a float or double array, even if it its data is stored\n on the GPU or somewhere else!\n "] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Here we demonstrate that a tensor located on a non-CPU `Device` will be\n updated when passing a float or double array, even if it its data is stored\n on the GPU or somewhere else!\n "] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Here we demonstrate that a tensor located on a non-CPU `Device` will be\n updated when passing a float or double array, even if it its data is stored\n on the GPU or somewhere else!\n "] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Here we demonstrate that a tensor located on a non-CPU `Device` will be\n updated when passing a float or double array, even if it its data is stored\n on the GPU or somewhere else!\n "] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Here we demonstrate that a tensor located on a non-CPU `Device` will be\n updated when passing a float or double array, even if it its data is stored\n on the GPU or somewhere else!\n "] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing a numeric array to a tensor should modify its contents! [9]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor which we transfer to a device using the \"to\" method...","code":["Tensor t = Tensor.of(Shape.of(3, 2), new double[]{2, 4, -5, 8, 3, -2}).to(device)"]}, + + {"kind":"when","text":"A numeric array is passed to said tensor...","code":["t.mut.setItems(data1)","t.mut.setItems(data2)"]}, + + {"kind":"then","text":"The tensor (as String) contains the expected String.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"The following data is being used :","code":{"device":["Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"cpu\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")","Device.get(\"openCL\")"],"data1":["new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}","new float[0]","new float[]{2, 3, 4, 5, 6}","new float[]{3, 5, 6}","new double[]{9, 4, 7, -12}","new float[]{22, 24, 35, 80}"],"data2":["new float[0]","new float[]{1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}","new float[0]","new float[]{1, 1, 1, 1, 1}","new float[]{4, 2, 3}","new double[]{-5, -2, 1}","new double[]{-1, -1, -1}"],"expected":["\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 6.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\"","\"(3x2):[2.0, 4.0, -5.0, 8.0, 3.0, -2.0]\"","\"(3x2):[1.0, 1.0, 1.0, 1.0, 1.0, -2.0]\"","\"(3x2):[4.0, 2.0, 3.0, 8.0, 3.0, -2.0]\"","\"(3x2):[-5.0, -2.0, 1.0, -12.0, 3.0, -2.0]\"","\"(3x2):[-1.0, -1.0, -1.0, 80.0, 3.0, -2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Devices expose an API for accessing (reading and writing) the data of a tensor. [0]", + "result":"PASS", + "duration":"0.009 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Because in some environments OpenCL might not be available, the test will be stopped!","code":["if ( device == null ) return"]}, + + {"kind":"when","text":"A 2D tensor is being instantiated by passing the given shape and data...","code":["Tensor t = Tensor.of(Shape.of(shape), data).to(device)"]}, + + {"kind":"then","text":"The tensor values (as List) are as expected.","code":["Arrays.equals(t.getItemsAs(double[].class), DataConverter.get().convert(expected,double[].class))"]}, + + {"kind":"when","text":"The same underlying data is being queried by calling the device...","code":["var result = (0..> device"]}, + + {"kind":"when","text":"The call is being passed to the execution utility method ..","code":["AbstractDeviceAlgorithm.prepareAndExecute( call, AbstractDeviceAlgorithm::executeDeviceAlgorithm )"]}, + + {"kind":"then","text":"...the implementation is being accessed in order to access the mocked lambda...","code":["(1.._) * call.getAlgorithm() >> implementation"]}, + + {"kind":"and","text":"The tensor array is being accessed to check for null. (For exception throwing)","code":["1 * call.inputs() >> new Tensor[]{ Mock(Tensor), null }"]}, + + {"kind":"and","text":"The expected exception is being thrown alongside a descriptive message.","code":["def exception = thrown(IllegalArgumentException)","exception.message == \"Device arguments may not be null!\\n\" +"," \"One or more tensor arguments within the given ExecutionCall instance is null.\""]}, + + {"kind":"where","text":"The following Device instances are being tested :","code":{"device":["CPU.get()","Device.get(\"openCL\")"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Execution calls containing null arguments will cause an exception to be thrown in device instances. [1]", "result":"PASS", - "duration":"0.294 seconds", + "duration":"0.001 seconds", "iterations":{ - "tags":{},"see":[],"extraInfo":["\n Every argument within an ExecutionCall instance has a purpose. Null is not permissible.\n ","\n Every argument within an ExecutionCall instance has a purpose. Null is not permissible.\n "] + "tags":{},"see":[],"extraInfo":[] }, "blocks":[ {"kind":"given","text":"A mocked ExecutionCall with mocked algorithm...","code":["var call = Mock(ExecutionCall)","var implementation = Mock(Algorithm)"]}, @@ -142,7 +960,7 @@ { "id":"Devices store tensors which can also be restored.", "result":"PASS", - "duration":"0.005 seconds", + "duration":"0.029 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n A Device implementation keeps track of the number of tensors it \"owns\",\n which you can see below. This is basically just reference counting.\n\n Also note that in this example we are making some exceptions for the CPU device\n simply because it is the default device on which all tensors are being stored\n if no other device is specified.\n "] }, @@ -171,11 +989,40 @@ }, { - "id":"Devices store slices which can also be restored just like any other tensor.", + "id":"Devices store slices which can also be restored just like any other tensor. [0]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note that in this example we are making some exceptions for the CPU device\n simply because it is the default device on which all tensors are being stored\n if no other device is specified.\n "] + }, + "blocks":[ + {"kind":"given","text":"The given device is available and Neureka is being reset.","code":["if ( device == null ) return"]}, + + {"kind":"and","text":"Two tensors which will be transferred later on...","code":["int initialNumber = device.numberOfStored()","Tensor a = Tensor.of([2, 3], \";)\")","Tensor b = a[1, 0..2]"]}, + + {"kind":"expect","text":"The given device is initially empty.","code":["device.isEmpty() == ( device.numberOfStored() == 0 )","!device.has( a ) || device instanceof CPU","!device.has( b ) || device instanceof CPU"]}, + + {"kind":"when","text":"The the first tensor is being passed to the device...","code":["device.store( a )"]}, + + {"kind":"then","text":"...tensor \"a\" is now on the device.","code":["!device.isEmpty()","device.numberOfStored() == initialNumber + ( device instanceof CPU ? 2 : 1 )","device.has( a )"]}, + + {"kind":"and","text":"","code":["!device.has( b ) || device instanceof CPU"]}, + + {"kind":"when","text":"","code":["device.free( a )"]}, + + {"kind":"then","text":"...the device is empty again.","code":["device.isEmpty() == (( initialNumber == 0 ) && !( device instanceof CPU ))","device.numberOfStored() == initialNumber + ( device instanceof CPU ? 1 : 0 )","!device.has( a ) || device instanceof CPU","!device.has( b ) || device instanceof CPU"]}, + + {"kind":"where","text":"The following Device instances are being tested :","code":{"device":["CPU.get()","Device.get( \"openCL\" )"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Devices store slices which can also be restored just like any other tensor. [1]", "result":"PASS", - "duration":"0.007 seconds", + "duration":"0", "iterations":{ - "tags":{},"see":[],"extraInfo":["\n Note that in this example we are making some exceptions for the CPU device\n simply because it is the default device on which all tensors are being stored\n if no other device is specified.\n ","\n Note that in this example we are making some exceptions for the CPU device\n simply because it is the default device on which all tensors are being stored\n if no other device is specified.\n "] + "tags":{},"see":[],"extraInfo":[] }, "blocks":[ {"kind":"given","text":"The given device is available and Neureka is being reset.","code":["if ( device == null ) return"]}, @@ -200,9 +1047,48 @@ }, { - "id":"A device will keep track of the amount of tensors and data objects it stores.", + "id":"A device will keep track of the amount of tensors and data objects it stores. [0]", + "result":"PASS", + "duration":"5.953 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We note the initial amount of tensors stored on the device.","code":["System.gc()","Sleep.until(5_000, 100, {CPU.get().numberOfStored() == 0})","int initial = device.numberOfStored()","int initialDataObjects = device.numberOfDataObjects()"]}, + + {"kind":"when","text":"We first create a data object...","code":["var data = Data.of( 42, 73, 11, 7 )"]}, + + {"kind":"then","text":"The CPU should not have stored any tensors yet.","code":["device.numberOfStored() == initial"]}, + + {"kind":"when","text":"We create a tensor from the data object...","code":["var t = Tensor.of( Shape.of(2, 2), data ).to(device)"]}, + + {"kind":"then","text":"The device should know about the existence of a new tensor.","code":["device.numberOfStored() == initial + 1"]}, + + {"kind":"and","text":"The number of data objects stored on the device should also be increased.","code":["device.numberOfDataObjects() == initialDataObjects + 1"]}, + + {"kind":"when","text":"We create a new tensor from the first one...","code":["var t2 = t * 2"]}, + + {"kind":"then","text":"The device should know about the existence of a new tensor as well as the data objects.","code":["device.numberOfStored() == initial + 2","device.numberOfDataObjects() == initialDataObjects + 2"]}, + + {"kind":"when","text":"We however create a new reshaped version of the first tensor...","code":["var t3 = t.reshape( 4 )"]}, + + {"kind":"then","text":"The device should also know about the existence of a new tensor, but not a new data object.","code":["device.numberOfStored() == initial + 3","device.numberOfDataObjects() == initialDataObjects + 2"]}, + + {"kind":"when","text":"We delete the references to the tensors, and then give the GC some time to do its job...","code":["t = null","t2 = null","t3 = null","System.gc()","Thread.sleep( 128 )","Sleep.until(1028, {device.numberOfStored() == initial})"]}, + + {"kind":"then","text":"The device should have forgotten about the tensors.","code":["device.numberOfStored() == initial"]}, + + {"kind":"and","text":"The device should have forgotten about the data objects as well.","code":["device.numberOfDataObjects() == initialDataObjects"]}, + + {"kind":"where","text":"The following Device instances are being tested :","code":{"device":["CPU.get()","Device.get( \"openCL\" )"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A device will keep track of the amount of tensors and data objects it stores. [1]", "result":"PASS", - "duration":"0.374 seconds", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -272,11 +1158,42 @@ }, { - "id":"Virtual tensors stay virtual when outsourced.", + "id":"Virtual tensors stay virtual when outsourced. [0]", "result":"PASS", - "duration":"0.006 seconds", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Note: A virtual tensor is a tensor which is not yet fully initialized\n in the sense that the data array is not yet allocated according to the \n tensors size (number of elements they hold).\n This is the case for tensor which are filled homogeneously with a single value,\n like for example an all 0 tensor. \n "] + }, + "blocks":[ + {"kind":"given","text":"We create a homogeneously filled tensor, which is therefor \"virtual\".","code":["var t = Tensor.ofFloats().withShape(4,3).all(-0.54f)"]}, + + {"kind":"and","text":"We also get a device for testing...","code":["var device = Device.get(deviceType)"]}, + + {"kind":"expect","text":"We expect that the tensor is virtual, meaning its underlying data array stores only a single value...","code":["t.isVirtual()"]}, + + {"kind":"when","text":"We send the tensor to the device...","code":["t.to(device)"]}, + + {"kind":"then","text":"This should cause it to be \"outsourced\", (except dor a CPU device of course).","code":["t.isOutsourced() != ( device instanceof CPU )"]}, + + {"kind":"and","text":"...we expect the tensor to stay virtual on the device!","code":["t.isVirtual()"]}, + + {"kind":"when","text":"We restore the device...","code":["device.restore(t)"]}, + + {"kind":"then","text":"The tensor should no longer be outsourced.","code":["!t.isOutsourced()"]}, + + {"kind":"and","text":"It should still be virtual!","code":["t.isVirtual()"]}, + + {"kind":"where","text":"We test on the following devices:","code":{"deviceType":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Virtual tensors stay virtual when outsourced. [1]", + "result":"PASS", + "duration":"0.001 seconds", "iterations":{ - "tags":{},"see":[],"extraInfo":["\n Note: A virtual tensor is a tensor which is not yet fully initialized\n in the sense that the data array is not yet allocated according to the \n tensors size (number of elements they hold).\n This is the case for tensor which are filled homogeneously with a single value,\n like for example an all 0 tensor. \n ","\n Note: A virtual tensor is a tensor which is not yet fully initialized\n in the sense that the data array is not yet allocated according to the \n tensors size (number of elements they hold).\n This is the case for tensor which are filled homogeneously with a single value,\n like for example an all 0 tensor. \n "] + "tags":{},"see":[],"extraInfo":[] }, "blocks":[ {"kind":"given","text":"We create a homogeneously filled tensor, which is therefor \"virtual\".","code":["var t = Tensor.ofFloats().withShape(4,3).all(-0.54f)"]}, diff --git a/docs/spock/reports/ut.device.FileDevice_Spec.json b/docs/spock/reports/ut.device.FileDevice_Spec.json index 348c95aa3..740726677 100644 --- a/docs/spock/reports/ut.device.FileDevice_Spec.json +++ b/docs/spock/reports/ut.device.FileDevice_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.device.FileDevice_Spec", "title":"FileDevice, Storing Tensors in Files", - "narrative":"The `FileDevice` class, one of many implementations of the `Device` interface,\n represents a file directory which can store and load tensors as files (`idx`, `jpg`, `png`...).", + "narrative":"The `FileDevice` class, one of many implementations of the `Device` interface, \n represents a file directory which can store and load tensors as files (`idx`, `jpg`, `png`...).", "subjects":["neureka.devices.file.FileDevice","neureka.devices.Device"], "statistics":{ - "runs":"4", + "runs":"12", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.048 seconds" + "duration":"0.161 seconds" }, "headers":["\n This specification covers the behavior of the `FileDevice`\n class, which enables the persistence of tensor data. \n "],"tags":{},"see":[], "features":[ { "id":"A file device stores tensors in idx files by default.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -43,9 +43,9 @@ }, { - "id":"A file device stores tensors in various file formats.", + "id":"A file device stores tensors in various file formats. [0]", "result":"PASS", - "duration":"0.025 seconds", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -78,12 +78,152 @@ }, { - "id":"The file device can load known files in a directory.", + "id":"A file device stores tensors in various file formats. [1]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new tensor is being created for testing.","code":["var a = Tensor.of( Shape.of(shape), -8d..8d )"]}, + + {"kind":"and","text":"A String representation of the shape.","code":["var shapeStr = String.join('x',(shape as List).collect {String.valueOf(it)})"]}, + + {"kind":"and","text":"A file device instance is being accessed for a given path.","code":["var device = FileDevice.at( path )"]}, + + {"kind":"expect","text":"Initially the device does not store our newly created tensor.","code":["!device.contains(a)"]}, + + {"kind":"when","text":"Tensor \"a\" is being stored in the device...","code":["if ( filename != null ) device.store( a, filename ) else device.store( a )"]}, + + {"kind":"then","text":"The expected file is being created at the given path.","code":["new File( path + '/' + filename ).exists()"," ||","new File( path ).listFiles().any {"," it.name.startsWith('tensor_' + shapeStr + '_f64_') && it.name.endsWith('.idx')","}"]}, + + {"kind":"and","text":"Tensor \"a\" does no longer have a value (stored in RAM).","code":["a.mut.data.getOrNull() == null"]}, + + {"kind":"and","text":"The tensor is now of the expected data-type.","code":["a.dataType == DataType.of( dataTypeClass )"]}, + + {"kind":"and","text":"The device contains a \"FileHandle\" instances of the expected type.","code":["device.fileHandleOf( a ).class == fileHandleClass"]}, + + {"kind":"when","text":"Freeing the tensor...","code":["device.free( a )"]}, + + {"kind":"then","text":"The file will be deleted!","code":["!new File( path + '/' + filename ).exists()","!new File( path ).listFiles().any {it.name.startsWith('tensor_' + shapeStr + '_f64_') }"]}, + + {"kind":"where","text":"The following parameters are being used:","code":{"path":["\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\""],"filename":["\"tensor_2x4x3_.idx\"","\"tensor_2x4x3_.jpg\"","\"tensor_5x3x4_.png\"","null","\"tensor_4x3_.csv\""],"shape":["[2,4,3]","[2,4,3]","[5,3,4]","[2,4,3]","[4,3]"],"fileHandleClass":["IDXHandle.class","JPEGHandle.class","PNGHandle.class","IDXHandle.class","CSVHandle.class"],"dataTypeClass":["F64.class","UI8.class","UI8.class","F64.class","String.class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A file device stores tensors in various file formats. [2]", "result":"PASS", "duration":"0.006 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"A new tensor is being created for testing.","code":["var a = Tensor.of( Shape.of(shape), -8d..8d )"]}, + + {"kind":"and","text":"A String representation of the shape.","code":["var shapeStr = String.join('x',(shape as List).collect {String.valueOf(it)})"]}, + + {"kind":"and","text":"A file device instance is being accessed for a given path.","code":["var device = FileDevice.at( path )"]}, + + {"kind":"expect","text":"Initially the device does not store our newly created tensor.","code":["!device.contains(a)"]}, + + {"kind":"when","text":"Tensor \"a\" is being stored in the device...","code":["if ( filename != null ) device.store( a, filename ) else device.store( a )"]}, + + {"kind":"then","text":"The expected file is being created at the given path.","code":["new File( path + '/' + filename ).exists()"," ||","new File( path ).listFiles().any {"," it.name.startsWith('tensor_' + shapeStr + '_f64_') && it.name.endsWith('.idx')","}"]}, + + {"kind":"and","text":"Tensor \"a\" does no longer have a value (stored in RAM).","code":["a.mut.data.getOrNull() == null"]}, + + {"kind":"and","text":"The tensor is now of the expected data-type.","code":["a.dataType == DataType.of( dataTypeClass )"]}, + + {"kind":"and","text":"The device contains a \"FileHandle\" instances of the expected type.","code":["device.fileHandleOf( a ).class == fileHandleClass"]}, + + {"kind":"when","text":"Freeing the tensor...","code":["device.free( a )"]}, + + {"kind":"then","text":"The file will be deleted!","code":["!new File( path + '/' + filename ).exists()","!new File( path ).listFiles().any {it.name.startsWith('tensor_' + shapeStr + '_f64_') }"]}, + + {"kind":"where","text":"The following parameters are being used:","code":{"path":["\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\""],"filename":["\"tensor_2x4x3_.idx\"","\"tensor_2x4x3_.jpg\"","\"tensor_5x3x4_.png\"","null","\"tensor_4x3_.csv\""],"shape":["[2,4,3]","[2,4,3]","[5,3,4]","[2,4,3]","[4,3]"],"fileHandleClass":["IDXHandle.class","JPEGHandle.class","PNGHandle.class","IDXHandle.class","CSVHandle.class"],"dataTypeClass":["F64.class","UI8.class","UI8.class","F64.class","String.class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A file device stores tensors in various file formats. [3]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new tensor is being created for testing.","code":["var a = Tensor.of( Shape.of(shape), -8d..8d )"]}, + + {"kind":"and","text":"A String representation of the shape.","code":["var shapeStr = String.join('x',(shape as List).collect {String.valueOf(it)})"]}, + + {"kind":"and","text":"A file device instance is being accessed for a given path.","code":["var device = FileDevice.at( path )"]}, + + {"kind":"expect","text":"Initially the device does not store our newly created tensor.","code":["!device.contains(a)"]}, + + {"kind":"when","text":"Tensor \"a\" is being stored in the device...","code":["if ( filename != null ) device.store( a, filename ) else device.store( a )"]}, + + {"kind":"then","text":"The expected file is being created at the given path.","code":["new File( path + '/' + filename ).exists()"," ||","new File( path ).listFiles().any {"," it.name.startsWith('tensor_' + shapeStr + '_f64_') && it.name.endsWith('.idx')","}"]}, + + {"kind":"and","text":"Tensor \"a\" does no longer have a value (stored in RAM).","code":["a.mut.data.getOrNull() == null"]}, + + {"kind":"and","text":"The tensor is now of the expected data-type.","code":["a.dataType == DataType.of( dataTypeClass )"]}, + + {"kind":"and","text":"The device contains a \"FileHandle\" instances of the expected type.","code":["device.fileHandleOf( a ).class == fileHandleClass"]}, + + {"kind":"when","text":"Freeing the tensor...","code":["device.free( a )"]}, + + {"kind":"then","text":"The file will be deleted!","code":["!new File( path + '/' + filename ).exists()","!new File( path ).listFiles().any {it.name.startsWith('tensor_' + shapeStr + '_f64_') }"]}, + + {"kind":"where","text":"The following parameters are being used:","code":{"path":["\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\""],"filename":["\"tensor_2x4x3_.idx\"","\"tensor_2x4x3_.jpg\"","\"tensor_5x3x4_.png\"","null","\"tensor_4x3_.csv\""],"shape":["[2,4,3]","[2,4,3]","[5,3,4]","[2,4,3]","[4,3]"],"fileHandleClass":["IDXHandle.class","JPEGHandle.class","PNGHandle.class","IDXHandle.class","CSVHandle.class"],"dataTypeClass":["F64.class","UI8.class","UI8.class","F64.class","String.class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A file device stores tensors in various file formats. [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new tensor is being created for testing.","code":["var a = Tensor.of( Shape.of(shape), -8d..8d )"]}, + + {"kind":"and","text":"A String representation of the shape.","code":["var shapeStr = String.join('x',(shape as List).collect {String.valueOf(it)})"]}, + + {"kind":"and","text":"A file device instance is being accessed for a given path.","code":["var device = FileDevice.at( path )"]}, + + {"kind":"expect","text":"Initially the device does not store our newly created tensor.","code":["!device.contains(a)"]}, + + {"kind":"when","text":"Tensor \"a\" is being stored in the device...","code":["if ( filename != null ) device.store( a, filename ) else device.store( a )"]}, + + {"kind":"then","text":"The expected file is being created at the given path.","code":["new File( path + '/' + filename ).exists()"," ||","new File( path ).listFiles().any {"," it.name.startsWith('tensor_' + shapeStr + '_f64_') && it.name.endsWith('.idx')","}"]}, + + {"kind":"and","text":"Tensor \"a\" does no longer have a value (stored in RAM).","code":["a.mut.data.getOrNull() == null"]}, + + {"kind":"and","text":"The tensor is now of the expected data-type.","code":["a.dataType == DataType.of( dataTypeClass )"]}, + + {"kind":"and","text":"The device contains a \"FileHandle\" instances of the expected type.","code":["device.fileHandleOf( a ).class == fileHandleClass"]}, + + {"kind":"when","text":"Freeing the tensor...","code":["device.free( a )"]}, + + {"kind":"then","text":"The file will be deleted!","code":["!new File( path + '/' + filename ).exists()","!new File( path ).listFiles().any {it.name.startsWith('tensor_' + shapeStr + '_f64_') }"]}, + + {"kind":"where","text":"The following parameters are being used:","code":{"path":["\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\"","\"build/test-can\""],"filename":["\"tensor_2x4x3_.idx\"","\"tensor_2x4x3_.jpg\"","\"tensor_5x3x4_.png\"","null","\"tensor_4x3_.csv\""],"shape":["[2,4,3]","[2,4,3]","[5,3,4]","[2,4,3]","[4,3]"],"fileHandleClass":["IDXHandle.class","JPEGHandle.class","PNGHandle.class","IDXHandle.class","CSVHandle.class"],"dataTypeClass":["F64.class","UI8.class","UI8.class","F64.class","String.class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The file device can load known files in a directory.", + "result":"PASS", + "duration":"0.017 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"A file device instance is being accessed for a simple test path.","code":["def device = FileDevice.at( 'build/resources/test/csv' )"]}, @@ -101,9 +241,141 @@ }, { - "id":"A tensor loaded from a file device can be loaded again.", + "id":"A tensor loaded from a file device can be loaded again. [0]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A file device instance is being accessed for a simple test path.","code":["def device = FileDevice.at( 'build/resources/test/idx2' )"]}, + + {"kind":"and","text":"We create a simple tensor which we want to save.","code":["var t = Tensor.of(shape, data)"]}, + + {"kind":"expect","text":"","code":["device.directory == 'build/resources/test/idx2'","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We save the tensor to the device.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The device contains the expected tensor.","code":["device.has( t )","t.isOutsourced()","t.getDevice() === device","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We load the tensor from the device.","code":["device.restore( t )"]}, + + {"kind":"then","text":"The tensor is restored correctly.","code":["!device.has( t )","!t.isOutsourced()","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We store the tensor again.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The tensor is stored correctly.","code":["device.has( t )","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"cleanup","text":"We delete the file again.","code":["new File( 'build/resources/test/idx2/my-tensor-file.idx' ).delete()"]}, + + {"kind":"where","text":"We use the following shapes and data arrays:","code":{"shape":["Shape.of(2, 3)","Shape.of(3, 2)","Shape.of(4)","Shape.of(5)","Shape.of(2)"],"data":["Data.of( 1, 2, 3, 4, 5, 6 )","Data.of( -1, -2, -3, -4, -5, -6 )","Data.of( 1.1, 2.2, 3.3, 4.4 )","Data.of( 0.3f, 0.4f, 0.5f, 0.6f, 0.7f )","Data.of( 42L, -7L )"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor loaded from a file device can be loaded again. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A file device instance is being accessed for a simple test path.","code":["def device = FileDevice.at( 'build/resources/test/idx2' )"]}, + + {"kind":"and","text":"We create a simple tensor which we want to save.","code":["var t = Tensor.of(shape, data)"]}, + + {"kind":"expect","text":"","code":["device.directory == 'build/resources/test/idx2'","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We save the tensor to the device.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The device contains the expected tensor.","code":["device.has( t )","t.isOutsourced()","t.getDevice() === device","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We load the tensor from the device.","code":["device.restore( t )"]}, + + {"kind":"then","text":"The tensor is restored correctly.","code":["!device.has( t )","!t.isOutsourced()","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We store the tensor again.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The tensor is stored correctly.","code":["device.has( t )","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"cleanup","text":"We delete the file again.","code":["new File( 'build/resources/test/idx2/my-tensor-file.idx' ).delete()"]}, + + {"kind":"where","text":"We use the following shapes and data arrays:","code":{"shape":["Shape.of(2, 3)","Shape.of(3, 2)","Shape.of(4)","Shape.of(5)","Shape.of(2)"],"data":["Data.of( 1, 2, 3, 4, 5, 6 )","Data.of( -1, -2, -3, -4, -5, -6 )","Data.of( 1.1, 2.2, 3.3, 4.4 )","Data.of( 0.3f, 0.4f, 0.5f, 0.6f, 0.7f )","Data.of( 42L, -7L )"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor loaded from a file device can be loaded again. [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A file device instance is being accessed for a simple test path.","code":["def device = FileDevice.at( 'build/resources/test/idx2' )"]}, + + {"kind":"and","text":"We create a simple tensor which we want to save.","code":["var t = Tensor.of(shape, data)"]}, + + {"kind":"expect","text":"","code":["device.directory == 'build/resources/test/idx2'","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We save the tensor to the device.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The device contains the expected tensor.","code":["device.has( t )","t.isOutsourced()","t.getDevice() === device","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We load the tensor from the device.","code":["device.restore( t )"]}, + + {"kind":"then","text":"The tensor is restored correctly.","code":["!device.has( t )","!t.isOutsourced()","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We store the tensor again.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The tensor is stored correctly.","code":["device.has( t )","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"cleanup","text":"We delete the file again.","code":["new File( 'build/resources/test/idx2/my-tensor-file.idx' ).delete()"]}, + + {"kind":"where","text":"We use the following shapes and data arrays:","code":{"shape":["Shape.of(2, 3)","Shape.of(3, 2)","Shape.of(4)","Shape.of(5)","Shape.of(2)"],"data":["Data.of( 1, 2, 3, 4, 5, 6 )","Data.of( -1, -2, -3, -4, -5, -6 )","Data.of( 1.1, 2.2, 3.3, 4.4 )","Data.of( 0.3f, 0.4f, 0.5f, 0.6f, 0.7f )","Data.of( 42L, -7L )"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor loaded from a file device can be loaded again. [3]", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A file device instance is being accessed for a simple test path.","code":["def device = FileDevice.at( 'build/resources/test/idx2' )"]}, + + {"kind":"and","text":"We create a simple tensor which we want to save.","code":["var t = Tensor.of(shape, data)"]}, + + {"kind":"expect","text":"","code":["device.directory == 'build/resources/test/idx2'","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We save the tensor to the device.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The device contains the expected tensor.","code":["device.has( t )","t.isOutsourced()","t.getDevice() === device","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We load the tensor from the device.","code":["device.restore( t )"]}, + + {"kind":"then","text":"The tensor is restored correctly.","code":["!device.has( t )","!t.isOutsourced()","t.getDevice() === CPU.get()","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"when","text":"We store the tensor again.","code":["device.store( t, 'my-tensor-file.idx' )"]}, + + {"kind":"then","text":"The tensor is stored correctly.","code":["device.has( t )","device.loadable.toSet() == [].toSet() // If this fails: consider deleting the build folder!!","device.loaded == []"]}, + + {"kind":"cleanup","text":"We delete the file again.","code":["new File( 'build/resources/test/idx2/my-tensor-file.idx' ).delete()"]}, + + {"kind":"where","text":"We use the following shapes and data arrays:","code":{"shape":["Shape.of(2, 3)","Shape.of(3, 2)","Shape.of(4)","Shape.of(5)","Shape.of(2)"],"data":["Data.of( 1, 2, 3, 4, 5, 6 )","Data.of( -1, -2, -3, -4, -5, -6 )","Data.of( 1.1, 2.2, 3.3, 4.4 )","Data.of( 0.3f, 0.4f, 0.5f, 0.6f, 0.7f )","Data.of( 42L, -7L )"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor loaded from a file device can be loaded again. [4]", + "result":"PASS", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.device.OpenCLDevice_Exception_Spec.json b/docs/spock/reports/ut.device.OpenCLDevice_Exception_Spec.json index 2455c9edf..f9515ccae 100644 --- a/docs/spock/reports/ut.device.OpenCLDevice_Exception_Spec.json +++ b/docs/spock/reports/ut.device.OpenCLDevice_Exception_Spec.json @@ -1,21 +1,21 @@ { "className":"ut.device.OpenCLDevice_Exception_Spec", "title":"OpenCLDevice Exception Handling", - "narrative":"The OpenCLDevice class, one of many implementations of the Device interface,\n represents physical OpenCL devices.\n This specification defines how instances of this class deal with exceptional information.", + "narrative":"The OpenCLDevice class, one of many implementations of the Device interface, \n represents physical OpenCL devices.\n This specification defines how instances of this class deal with exceptional information.", "subjects":[], "statistics":{ - "runs":"4", + "runs":"0", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"0.079 seconds" + "skipped":"4", + "duration":"0.006 seconds" }, "headers":["\n

        \n It is important that an OpenCLDevice gives insightful error messages\n when encountering exceptional situations.\n

        \n "],"tags":{},"see":[], "features":[ { "id":"An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.", - "result":"PASS", + "result":"IGNORED", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] @@ -38,8 +38,8 @@ { "id":"Ad hoc compilation produces expected exceptions.", - "result":"PASS", - "duration":"0.039 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -59,8 +59,8 @@ { "id":"Ad hoc compilation produces expected exceptions when duplication is found.", - "result":"PASS", - "duration":"0.036 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -84,7 +84,7 @@ { "id":"Trying to restore a tensor which is not on a device raises exception.", - "result":"PASS", + "result":"IGNORED", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] diff --git a/docs/spock/reports/ut.device.OpenCLDevice_Spec.json b/docs/spock/reports/ut.device.OpenCLDevice_Spec.json index d09cbc0fb..1700a270e 100644 --- a/docs/spock/reports/ut.device.OpenCLDevice_Spec.json +++ b/docs/spock/reports/ut.device.OpenCLDevice_Spec.json @@ -4,19 +4,19 @@ "narrative":"Tensors need devices for execution!\n By default tensors use the `CPU` device, but sometimes we want to\n use something more suitable for large amounts of data and a high degree of parallelization.\n This is were the `OpenCLDevice` comes into play!\n It is a `Device` implementation built on top of the JOCL library, a thin OpenCL API.\n We expect the `OpenCLDevice` to store tensors as well as being able to read and write\n data from and to stored tensors.\n Also, an `OpenCLDevice` should allows us to compile OpenCL kernel code on the fly...", "subjects":[], "statistics":{ - "runs":"8", + "runs":"0", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"2.217 seconds" + "skipped":"8", + "duration":"0.006 seconds" }, "headers":["\n Specified below are strict tests covering the behavior\n of the OpenCLDevice when hosting tensors and executing \n operations on them.\n "],"tags":{},"see":[], "features":[ { "id":"An OpenCLDevice loads tensors in a provided lambda temporarily.", - "result":"PASS", - "duration":"0.002 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -40,8 +40,8 @@ { "id":"We can get the items of an outsourced tensor as a primitive array.", - "result":"PASS", - "duration":"0.001 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -65,7 +65,7 @@ { "id":"We can take a look at the underlying data array of an outsourced tensor through the unsafe API.", - "result":"PASS", + "result":"IGNORED", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] @@ -86,8 +86,8 @@ { "id":"Ad hoc compilation produces executable kernel.", - "result":"PASS", - "duration":"0.040 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -111,8 +111,8 @@ { "id":"Ad hoc compilation works for WIP general purpose matrix multiplication.", - "result":"PASS", - "duration":"0.298 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -138,8 +138,8 @@ { "id":"Ad hoc matrix multiplication works for multiple of 16 matrices.", - "result":"PASS", - "duration":"0.188 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -165,8 +165,8 @@ { "id":"Ad hoc compilation works for custom simple row major based matrix multiplication.", - "result":"PASS", - "duration":"0.328 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -192,8 +192,8 @@ { "id":"Ad hoc compilation works for custom column major based tiled matrix multiplication.", - "result":"PASS", - "duration":"1.334 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.device.OpenCL_Spec.json b/docs/spock/reports/ut.device.OpenCL_Spec.json index 5d6428738..ed8302fbd 100644 --- a/docs/spock/reports/ut.device.OpenCL_Spec.json +++ b/docs/spock/reports/ut.device.OpenCL_Spec.json @@ -4,19 +4,19 @@ "narrative":"Neureka models the OpenCL API through various types of classes.\n The most fundamental of these is the `OpenCLDevice` class which\n represents a single device with OpenCL support.\n Besides that, there is also the `OpenCLContext` class which\n represents a OpenCL contexts, platforms and multiple devices on said platforms...", "subjects":["neureka.backend.ocl.CLBackend","neureka.devices.opencl.OpenCLDevice","neureka.devices.opencl.utility.DeviceQuery","neureka.devices.Device"], "statistics":{ - "runs":"5", + "runs":"0", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"3.009 seconds" + "skipped":"5", + "duration":"0.001 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"First found OpenCLDevice will have realistic properties inside summary query.", - "result":"PASS", - "duration":"0.001 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -30,7 +30,7 @@ { "id":"First found OpenCLDevice will have realistic numeric properties.", - "result":"PASS", + "result":"IGNORED", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] @@ -45,7 +45,7 @@ { "id":"First found OpenCLDevice will have realistic text properties.", - "result":"PASS", + "result":"IGNORED", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] @@ -60,8 +60,8 @@ { "id":"An OpenCLDevice will throw an exception when trying to add a tensor whose \"data parent\" is not outsourced.", - "result":"PASS", - "duration":"0.020 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -89,8 +89,8 @@ { "id":"A given OpenCL context can be disposed!", - "result":"PASS", - "duration":"2.984 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.device.internal.CLFunctionCompiler_Spec.json b/docs/spock/reports/ut.device.internal.CLFunctionCompiler_Spec.json index acb9de068..bd332708a 100644 --- a/docs/spock/reports/ut.device.internal.CLFunctionCompiler_Spec.json +++ b/docs/spock/reports/ut.device.internal.CLFunctionCompiler_Spec.json @@ -4,19 +4,19 @@ "narrative":"Neureka parses mathematical expressions into an AST representation\n hidden behind the Function interface...\n This feature does not exist without reason, we can use\n this abstract syntax tree to compile to OpenCL kernels\n for optimal execution speed!", "subjects":[], "statistics":{ - "runs":"4", + "runs":"2", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"0.170 seconds" + "skipped":"2", + "duration":"0.206 seconds" }, "headers":["\n Specified below are strict tests for covering the ability of \n OpenCL devices to be able produce optimized functions given\n a normal function instance created from a String...\n "],"tags":{},"see":[], "features":[ { "id":"The OpenCLDevice produces a working optimized Function for doubles.", - "result":"PASS", - "duration":"0.046 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -42,8 +42,8 @@ { "id":"The OpenCLDevice produces a working optimized Function for floats.", - "result":"PASS", - "duration":"0.044 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -70,7 +70,7 @@ { "id":"The CLFunctionCompiler produces an operation which properly integrates to the backend.", "result":"PASS", - "duration":"0.056 seconds", + "duration":"0.151 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -129,7 +129,7 @@ { "id":"The CLFunctionCompiler produces the expected \"ad hoc\" kernel.", "result":"PASS", - "duration":"0.021 seconds", + "duration":"0.051 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.device.internal.CPU_Kernel_Spec.json b/docs/spock/reports/ut.device.internal.CPU_Kernel_Spec.json index 3ab554c47..dfdc02d73 100644 --- a/docs/spock/reports/ut.device.internal.CPU_Kernel_Spec.json +++ b/docs/spock/reports/ut.device.internal.CPU_Kernel_Spec.json @@ -4,19 +4,19 @@ "narrative":"", "subjects":["neureka.backend.main.operations.other.internal.CPUReduce"], "statistics":{ - "runs":"2", + "runs":"20", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.027 seconds" + "duration":"0.083 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"The Reduce implementation for the CPU has realistic behaviour", + "id":"The Reduce implementation for the CPU has realistic behaviour [0]", "result":"PASS", - "duration":"0.008 seconds", + "duration":"0.014 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,349 @@ }, { - "id":"The Sum implementation for the CPU has realistic behaviour", + "id":"The Reduce implementation for the CPU has realistic behaviour [1]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [2]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [3]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [4]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [6]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [8]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [9]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [10]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the CPU has realistic behaviour [11]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.name().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var index = new CPUReduce(reduceType).run( call )","var result = a.item(index.item() as int)"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX","CPUReduce.Type.MIN","CPUReduce.Type.MAX"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [0]", + "result":"PASS", + "duration":"0.009 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var result = new CPUSum().run( call )"]}, + + {"kind":"then","text":"","code":["result.item() == expected","result.item() == result.items.stream().reduce(0, (x, y) -> x + y)"]}, + + {"kind":"where","text":"","code":{"dataType":["Float","Double","Integer","Long","Short","Byte","BigInteger","BigDecimal"],"expected":["-1222.0","-1026.0","-2251","-2083","1018","34","3930","-1502.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var result = new CPUSum().run( call )"]}, + + {"kind":"then","text":"","code":["result.item() == expected","result.item() == result.items.stream().reduce(0, (x, y) -> x + y)"]}, + + {"kind":"where","text":"","code":{"dataType":["Float","Double","Integer","Long","Short","Byte","BigInteger","BigDecimal"],"expected":["-1222.0","-1026.0","-2251","-2083","1018","34","3930","-1502.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [2]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var result = new CPUSum().run( call )"]}, + + {"kind":"then","text":"","code":["result.item() == expected","result.item() == result.items.stream().reduce(0, (x, y) -> x + y)"]}, + + {"kind":"where","text":"","code":{"dataType":["Float","Double","Integer","Long","Short","Byte","BigInteger","BigDecimal"],"expected":["-1222.0","-1026.0","-2251","-2083","1018","34","3930","-1502.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [3]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var result = new CPUSum().run( call )"]}, + + {"kind":"then","text":"","code":["result.item() == expected","result.item() == result.items.stream().reduce(0, (x, y) -> x + y)"]}, + + {"kind":"where","text":"","code":{"dataType":["Float","Double","Integer","Long","Short","Byte","BigInteger","BigDecimal"],"expected":["-1222.0","-1026.0","-2251","-2083","1018","34","3930","-1502.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [4]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var result = new CPUSum().run( call )"]}, + + {"kind":"then","text":"","code":["result.item() == expected","result.item() == result.items.stream().reduce(0, (x, y) -> x + y)"]}, + + {"kind":"where","text":"","code":{"dataType":["Float","Double","Integer","Long","Short","Byte","BigInteger","BigDecimal"],"expected":["-1222.0","-1026.0","-2251","-2083","1018","34","3930","-1502.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var result = new CPUSum().run( call )"]}, + + {"kind":"then","text":"","code":["result.item() == expected","result.item() == result.items.stream().reduce(0, (x, y) -> x + y)"]}, + + {"kind":"where","text":"","code":{"dataType":["Float","Double","Integer","Long","Short","Byte","BigInteger","BigDecimal"],"expected":["-1222.0","-1026.0","-2251","-2083","1018","34","3930","-1502.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [6]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var seed = dataType.getSimpleName().hashCode()","var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})","var call = ExecutionCall.of(a).running(Mock(Operation)).on(CPU.get())"]}, + + {"kind":"when","text":"","code":["var result = new CPUSum().run( call )"]}, + + {"kind":"then","text":"","code":["result.item() == expected","result.item() == result.items.stream().reduce(0, (x, y) -> x + y)"]}, + + {"kind":"where","text":"","code":{"dataType":["Float","Double","Integer","Long","Short","Byte","BigInteger","BigDecimal"],"expected":["-1222.0","-1026.0","-2251","-2083","1018","34","3930","-1502.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Sum implementation for the CPU has realistic behaviour [7]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ diff --git a/docs/spock/reports/ut.device.internal.OpenCL_Data_Spec.json b/docs/spock/reports/ut.device.internal.OpenCL_Data_Spec.json index 46c70e76a..10bcbec32 100644 --- a/docs/spock/reports/ut.device.internal.OpenCL_Data_Spec.json +++ b/docs/spock/reports/ut.device.internal.OpenCL_Data_Spec.json @@ -4,17 +4,278 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"2", + "runs":"39", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.019 seconds" + "duration":"0.089 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL.", + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [0]", + "result":"PASS", + "duration":"0.013 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [9]", "result":"PASS", "duration":"0.001 seconds", "iterations":{ @@ -43,9 +304,781 @@ }, { - "id":"The \"Data\" class can represent various OpenCL data types.", + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [12]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [15]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The OpenCLDevice specific Data class represents JVM data for OpenCL. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that any data will not automatically be converted to floats!","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"and","text":"We create 2 different data objects, a full and a partial/sliced array.","code":["var full = JVMData.of(data)","var slice = JVMData.of(data, size, start)"]}, + + {"kind":"and","text":"An expected array based on the previous slice indices!","code":["var expected2 = expected[start..(start+size-1)]"]}, + + {"kind":"expect","text":"Both data objects report the expected array types!","code":["full.array.class == expectedType","slice.array.class == expectedType"]}, + + {"kind":"and","text":"Also they report the expected data array size.","code":["full.length == expected.size()","slice.length == expected2.size()"]}, + + {"kind":"and","text":"","code":["full.array !== slice.array","full.array == expected","(data instanceof Number && slice.array == [data] ) || slice.array == expected2"]}, + + {"kind":"and","text":"They produce OpenCL specific pointer objects.","code":["full.pointer != null","slice.pointer != null"]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }"]}, + + {"kind":"where","text":"","code":{"data":["[2, 3, 6] as float[]","[8,-2,5,-1] as float[]","4 as Float","[2, 3, 6] as double[]","[8,-2,5,-1] as double[]","4 as Double","[2, 3, 6] as int[]","[8,-2,5,-1] as int[]","4 as Integer","[2, 3, 6] as short[]","[8,-2,5,-1] as short[]","4 as Short","[2, 3, 6] as long[]","[8,-2,5,-1] as long[]","4 as Long","[2, 3, 6] as byte[]","[8,-2,5,-1] as byte[]","4 as Byte"],"start":["1","1","0","1","1","0","1","1","0","1","1","0","1","1","0","1","1","0"],"size":["2","3","1","2","3","1","2","3","1","2","3","1","2","3","1","2","3","1"],"expectedType":["float[]","float[]","float[]","double[]","double[]","double[]","int[]","int[]","int[]","short[]","short[]","short[]","long[]","long[]","long[]","byte[]","byte[]","byte[]"],"expected":["[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]","[2, 3, 6]","[8,-2,5,-1]","[4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [0]", + "result":"PASS", + "duration":"0.011 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [9]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [15]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [16]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [18]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [19]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["array = array.asType(arrayType)","var data1 = JVMData.of(array, size, offset)","var data2 = JVMData.of(type, size)","var data3 = JVMData.of(array)"]}, + + {"kind":"expect","text":"","code":["data1.array == array[offset..(offset+size-1)].asType(arrayType)","data2.array == new int[size].asType(arrayType)","data3.array == array"]}, + + {"kind":"and","text":"","code":["data1.length == size","data2.length == size","data3.length == array.length"]}, + + {"kind":"and","text":"","code":["data1.itemSize == itemSize","data2.itemSize == itemSize","data3.itemSize == itemSize"]}, + + {"kind":"and","text":"","code":["data1.pointer != null","data2.pointer != null","data3.pointer != null"]}, + + {"kind":"and","text":"","code":["data1.type.name() == targetType","data2.type.name() == targetType","data3.type.name() == targetType"]}, + + {"kind":"and","text":"","code":["data1.array == (0..data1.getElementAt((int)it)})","data2.array == (0..data2.getElementAt((int)it)})","data3.array == (0..data3.getElementAt((int)it)})"]}, + + {"kind":"where","text":"","code":{"array":["[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[1, 2]","[8, 5, 2]","[42]","[1,2,3,4,5]","[0.3, -0.9]","[2, 0.5, 8]","[0.3, -0.9]","[2, 0.5, 8]","[0.6, 3, 0.2]"],"arrayType":["int[]","int[]","int[]","int[]","byte[]","byte[]","byte[]","byte[]","long[]","long[]","long[]","long[]","short[]","short[]","short[]","short[]","float[]","float[]","double[]","double[]","double[]"],"type":["Integer","Integer","Integer","Integer","Byte","Byte","Byte","Byte","Long","Long","Long","Long","Short","Short","Short","Short","Float","Float","Double","Double","Double"],"itemSize":["4","4","4","4","1","1","1","1","8","8","8","8","2","2","2","2","4","4","8","8","8"],"size":["1","2","1","3","1","2","1","3","1","2","1","3","1","2","1","3","2","2","2","2","1"],"offset":["1","0","0","2","1","0","0","2","1","0","0","2","1","0","0","2","0","0","0","0","1"],"targetType":["'I32'","'I32'","'I32'","'I32'","'I8'","'I8'","'I8'","'I8'","'I64'","'I64'","'I64'","'I64'","'I16'","'I16'","'I16'","'I16'","'F32'","'F32'","'F64'","'F64'","'F64'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"Data\" class can represent various OpenCL data types. [20]", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.device.internal.OpenCL_Kernel_Unit_Spec.json b/docs/spock/reports/ut.device.internal.OpenCL_Kernel_Unit_Spec.json index 5adc1c2f5..983fd7ad4 100644 --- a/docs/spock/reports/ut.device.internal.OpenCL_Kernel_Unit_Spec.json +++ b/docs/spock/reports/ut.device.internal.OpenCL_Kernel_Unit_Spec.json @@ -4,19 +4,19 @@ "narrative":"", "subjects":["neureka.backend.main.operations.linear.internal.opencl.CLGEMM","neureka.backend.main.operations.linear.internal.opencl.CLReduce"], "statistics":{ - "runs":"5", + "runs":"6", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.012 seconds" + "duration":"0.055 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The GEMM implementation for the OpenCLDevice has realistic behaviour", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.006 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -31,7 +31,28 @@ }, { - "id":"The Reduce implementation for the OpenCLDevice has realistic behaviour", + "id":"The Reduce implementation for the OpenCLDevice has realistic behaviour [0]", + "result":"PASS", + "duration":"0.011 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var a = Tensor.ofFloats().withShape(19, 7).andWhere({ i, _ -> (1+(7**i)%30)})","var call = Mock(ExecutionCall)","var device = Mock(OpenCLDevice)","var kernel = Mock(KernelCaller)"]}, + + {"kind":"when","text":"","code":["new CLReduce(type).run( call )"]}, + + {"kind":"then","text":"","code":["_ * call.input(0) >> a","(1.._) * call.input(Float, 0) >> a","(1.._) * call.getDevice() >> device","(1.._) * device.maxWorkGroupSize() >> 64","(0.._) * device.hasAdHocKernel(\"fast_${type.name().toLowerCase()}_reduce_RTS64\") >>> [false, true]","(0.._) * device.findAdHocKernel(\"fast_${type.name().toLowerCase()}_reduce_RTS64\") >> Optional.of(kernel)","(0.._) * device.compileAdHocKernel(\"fast_${type.name().toLowerCase()}_reduce_RTS64\", _) >> device","(0.._) * device.findOrCompileAdHocKernel(\"fast_${type.name().toLowerCase()}_reduce_RTS64\", _) >> kernel","(0.._) * device.compileAndGetAdHocKernel(\"fast_${type.name().toLowerCase()}_reduce_RTS64\", _) >> kernel","(0.._) * device.getAdHocKernel(\"fast_${type.name().toLowerCase()}_reduce_RTS64\") >> kernel","(3.._) * kernel.pass(_) >> kernel"]}, + + {"kind":"and","text":"","code":["(0.._) * device.hasAdHocKernel(CLReduce.INDICES_MAPPER_ID) >>> [false, true]","(0.._) * device.compileAdHocKernel(CLReduce.INDICES_MAPPER_ID, _) >> device","(0.._) * device.compileAndGetAdHocKernel(CLReduce.INDICES_MAPPER_ID, _) >> kernel","(0.._) * device.findOrCompileAdHocKernel(CLReduce.INDICES_MAPPER_ID, _) >> kernel","(0.._) * device.getAdHocKernel(CLReduce.INDICES_MAPPER_ID) >> kernel"]}, + + {"kind":"where","text":"","code":{"type":["CLReduce.Type.MIN","CLReduce.Type.MAX"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The Reduce implementation for the OpenCLDevice has realistic behaviour [1]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ @@ -54,7 +75,7 @@ { "id":"The Sum implementation for the OpenCLDevice has realistic behaviour", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.010 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -71,7 +92,7 @@ { "id":"The Sum implementation for the OpenCLDevice has realistic behaviour for when the number of elements is a prime.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -88,7 +109,7 @@ { "id":"The CLDot implementation for the OpenCLDevice has realistic behaviour", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.011 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.dtype.DataType_Spec.json b/docs/spock/reports/ut.dtype.DataType_Spec.json index 53c1acf54..ebea7b40e 100644 --- a/docs/spock/reports/ut.dtype.DataType_Spec.json +++ b/docs/spock/reports/ut.dtype.DataType_Spec.json @@ -4,17 +4,206 @@ "narrative":"", "subjects":["neureka.dtype.DataType"], "statistics":{ - "runs":"1", + "runs":"10", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.005 seconds" + "duration":"0.010 seconds" }, "headers":["\n This specification tests the \"DataType\" class, which hosts a multiton\n design pattern in order to guarantee uniqueness of instances of the type\n which represent the same class type.
        \n Instances of this class wrap a Class variable which is the type of the data of the tensor.
        \n (The following types are usually used : UI8, I8, UI16, I16, UI32, I64, I32, F32, F64 )\n "],"tags":{},"see":[], "features":[ { - "id":"DataType multi-ton instances behave as expected.", + "id":"DataType multi-ton instances behave as expected. [0]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A \"DataType\" instance representing / wrapping the relevant datatype Class passed to \"instance(...).\"","code":["DataType dt = DataType.of( typeClass )"]}, + + {"kind":"expect","text":"The found instance is not null!","code":["dt != null"]}, + + {"kind":"and","text":"It contains the Class that it represents.","code":["dt.getRepresentativeType() == targetClass"]}, + + {"kind":"and","text":"This class either does or does not implement the \"NumericType\" interface.","code":["dt.typeClassImplements(NumericType.class) == isNumericType"]}, + + {"kind":"where","text":"The following data is being used :","code":{"typeClass":["I16.class","UI8.class","Float.class","Double.class","Short.class","Byte.class","String.class","Date.class","Object.class","Specification.class"],"targetClass":["I16.class","UI8.class","F32.class","F64.class","I16.class","I8.class","String.class","Date.class","Object.class","Specification.class"],"isNumericType":["true","true","true","true","true","true","false","false","false","false"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"DataType multi-ton instances behave as expected. [9]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.dtype.NumericType_Spec.json b/docs/spock/reports/ut.dtype.NumericType_Spec.json index 593e07483..ab2ce5f2c 100644 --- a/docs/spock/reports/ut.dtype.NumericType_Spec.json +++ b/docs/spock/reports/ut.dtype.NumericType_Spec.json @@ -1,20 +1,20 @@ { "className":"ut.dtype.NumericType_Spec", "title":"The NumericType and its implementations model their respective numeric data types.", - "narrative":"This specification covers the behavior of the NumericType interface\n which is responsible for modelling numeric data types which may or may not be native to the JVM.\n These implementations however do not model them in the traditional OO style\n but merely expose useful utility method for converting and representing\n these numeric data types using JVM types.", + "narrative":"This specification covers the behavior of the NumericType interface\n which is responsible for modelling numeric data types which may or may not be native to the JVM. \n These implementations however do not model them in the traditional OO style\n but merely expose useful utility method for converting and representing \n these numeric data types using JVM types.", "subjects":["neureka.dtype.NumericType"], "statistics":{ - "runs":"4", + "runs":"58", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.032 seconds" + "duration":"0.061 seconds" }, "headers":["\n This specification covers implementations\n of the \"interface neureka.dtype.NumericType\" interface.\n Such classes are responsible for\n representing all numeric types including the ones\n which are foreign to the JVM, namely : \n unsigned integer types.\n "],"tags":{},"see":[], "features":[ { - "id":"NumericType implementations return their expected properties.", + "id":"NumericType implementations return their expected properties. [0]", "result":"PASS", "duration":"0", "iterations":{ @@ -35,12 +35,201 @@ }, { - "id":"NumericType implementations behave as expected.", + "id":"NumericType implementations return their expected properties. [1]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations return their expected properties. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The type instance describes the expected number of bytes.","code":["type.numberOfBytes() == bytes"]}, + + {"kind":"and","text":"It describes the expected JVM target type.","code":["type.targetType() == target"]}, + + {"kind":"and","text":"It also describes the expected array type of said JVM target type.","code":["type.targetArrayType() == array"]}, + + {"kind":"and","text":"The instance knows if it is signed or not.","code":["type.signed() == signed"]}, + + {"kind":"where","text":"The following data is being used: ","code":{"type":["new I8()","new UI8()","new I16()","new UI16()","new I32()","new UI32()","new I64()","new UI64()","new F32()","new F64()"],"bytes":["1","1","2","2","4","4","8","8","4","8"],"target":["Byte.class","Short.class","Short.class","Integer.class","Integer.class","Long.class","Long.class","BigInteger.class","Float.class","Double.class"],"array":["byte[].class","short[].class","short[].class","int[].class","int[].class","long[].class","long[].class","BigInteger[].class","float[].class","double[].class"],"signed":["true","false","true","false","true","false","true","false","true","true"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, @@ -54,30 +243,1031 @@ }, { - "id":"Conversion goes both ways and produces expected numeric values.", + "id":"NumericType implementations behave as expected. [1]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, - {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, - {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, - {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, - {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, - {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType implementations behave as expected. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def result = type.foreignHolderBytesToTarget( data as byte[] )"]}, + + {"kind":"expect","text":"The array of bytes is being converted to a fitting JVM type.","code":["result == converted"]}, + + {"kind":"and","text":"The original byte array can be recreated by converting with the inverse...","code":["type.targetToForeignHolderBytes(result) == ( data as byte[] )"]}, + + {"kind":"where","text":"The following NumericType instances and bytes are being used :","code":{"type":["new I8()","new UI8()","new I16()","new I16()","new I16()","new I16()","new UI16()","new UI16()","new UI16()","new UI16()","new I32()","new I32()","new UI32()","new UI32()","new I64()","new I64()","new UI64()","new UI64()"],"data":["[-23]","[-23]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[2, 3]","[-16, -53]","[16, -53]","[-1, -1]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[22,-2, 3,-4]","[-22,-2, -3,-4]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]","[99, 2, 1, 35, 2, 5, 37, 22]","[-99, 2, 1, -35, 2,5,-37,22]"],"converted":["-23","233","new BigInteger(new byte[]{2, 3}).shortValueExact()","((short)-3893)","4299","-1","new BigInteger(new byte[]{2, 3}).shortValueExact()","(int)(0x10000 + ((short)-3893))","4_299","65_535","385_745_916","-352_387_588","385_745_916","3_942_579_708","7_134_266_009_577_661_718","- 7_133_136_811_068_105_962","7_134_266_009_577_661_718","11_313_607_262_641_445_654"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [0]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Conversion goes both ways and produces expected numeric values. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We apply a filter in order to guarantee that the right data type is being used.","code":["original = ["," 'UI8' : { o -> o as Byte },"," 'UI16': { o -> o as Short },"," 'UI32': { o -> o as Integer },"," 'UI64': { o -> o as Long },"," 'I8' : { o -> o as Byte },"," 'I16' : { o -> o as Short },"," 'I32' : { o -> o as Integer },"," 'I64' : { o -> o as Long },"," 'F32' : { o -> o as Float },"," 'F64' : { o -> o as Double }","][ num.class.simpleName ](original)"]}, + + {"kind":"and","text":"The convert the raw type (might represent unsigned value) to a JVM compatible target type...","code":["def resultTarget = num.foreignHolderBytesToTarget( rawOriginal )"]}, + + {"kind":"and","text":"Then convert this result to the true byte array of the value...","code":["def backToRaw = num.targetToForeignHolderBytes( resultTarget )"]}, + + {"kind":"then","text":"This produces the expected values which express the following relationships:","code":["resultTarget == target","backToRaw == rawOriginal","num.toTarget( original ) == target","num.convertToHolder(target) == original"]}, + + {"kind":"and","text":"The numeric type instance can perform array conversion.","code":["num.convertToTargetArray( rawOriginal as double[] ) == rawOriginal // Groovy automatically tests values","num.convertToTargetArray( rawOriginal as float[] ) == rawOriginal // ...despite difference types...","num.convertToTargetArray( rawOriginal as int[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as short[] ) == rawOriginal","num.convertToTargetArray( rawOriginal as long[] ) == rawOriginal"]}, + + {"kind":"where","text":"The following \"NumericType\" implementation instances and numeric data is being used: ","code":{"num":["new UI8()","new UI16()","new UI32()","new UI64()","new I8()","new I16()","new I32()","new I64()","new F32()","new F64()","new F32()","new F64()"],"original":["-3","-3","-3","-3","-3","-3","-3","-3","-0.3","-0.3","-5432.39928","-5432.39928"],"rawOriginal":["[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-3]","[255, 253]","[255, 255, 255, 253]","[255, 255, 255, 255, 255, 255, 255, 253]","[-66, -103, -103, -102]","[-65, -45, 51, 51, 51, 51, 51, 51]","[-59, -87, -61, 50]","[-64, -75, 56, 102, 55, 54, -51, -14]"],"target":["255 - 2","65_535 - 2","4_294_967_295 - 2","18_446_744_073_709_551_615 - 2","- 3","- 3","- 3","- 3","- 0.3 as Float","- 0.3 as Double","-5432.39928 as Float","-5432.39928 as Double"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"NumericType conversion to holder types yields expected results. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = num.convertToHolder( from )"]}, + + {"kind":"then","text":"","code":["result == expected"]}, + + {"kind":"and","text":"","code":["result.class == expected.class"]}, + + {"kind":"and","text":"","code":["result.class == holderType"]}, + + {"kind":"and","text":"","code":["num.holderType() == holderType"]}, + + {"kind":"and","text":"","code":["num.holderArrayType() == holderArrayType"]}, + + {"kind":"where","text":"","code":{"num":["new I32()","new I32()","new I32()","new I32()","new I32()","new I32()","new I16()","new I16()","new I16()","new I16()","new I16()","new I16()","new I8()","new I8()","new I8()","new I8()","new I8()","new I8()"],"from":["3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float","3 as Byte","8 as Integer","863.834 as Double","2 as Short","9 as Long","23.422 as Float"],"expected":["3 as Integer","8 as Integer","863 as Integer","2 as Integer","9 as Integer","23 as Integer","3 as Short","8 as Short","863 as Short","2 as Short","9 as Short","23 as Short","3 as Byte","8 as Byte","863 as Byte","2 as Byte","9 as Byte","23 as Byte"],"holderType":["Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Integer.class","Short.class","Short.class","Short.class","Short.class","Short.class","Short.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class","Byte.class"],"holderArrayType":["int[].class","int[].class","int[].class","int[].class","int[].class","int[].class","short[].class","short[].class","short[].class","short[].class","short[].class","short[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class","byte[].class"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"NumericType conversion to holder types yields expected results.", + "id":"NumericType conversion to holder types yields expected results. [17]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.framing.Tensor_Framing_Spec.json b/docs/spock/reports/ut.framing.Tensor_Framing_Spec.json index 1f6e94478..5d4505cfb 100644 --- a/docs/spock/reports/ut.framing.Tensor_Framing_Spec.json +++ b/docs/spock/reports/ut.framing.Tensor_Framing_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.framing.Tensor_Framing_Spec", "title":"Naming Tensors and their Dimensions.", - "narrative":"A powerful concept in the data science as well as machine learning\n world is something usually referred to as \"Data Frames\".\n These are highly flexible 2D data structures\n used to load and store CSV, CRV, etc... files for\n data exploration and further processing.\n Data frames are so powerful because\n their indices are labeled and therefore human readable.\n Neureka's tensors are general purpose data containers\n which may also stored data in 2 dimensions whose\n indices may also be something other than integers.", + "narrative":"A powerful concept in the data science as well as machine learning\n world is something usually referred to as \"Data Frames\".\n These are highly flexible 2D data structures\n used to load and store CSV, CRV, etc... files for \n data exploration and further processing.\n Data frames are so powerful because\n their indices are labeled and therefore human readable.\n Neureka's tensors are general purpose data containers\n which may also stored data in 2 dimensions whose\n indices may also be something other than integers.", "subjects":["neureka.Tensor","neureka.framing.NDFrame"], "statistics":{ "runs":"4", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.014 seconds" + "duration":"0.035 seconds" }, "headers":["\n This specification covers the behavior\n of tensors with respect to specifying aliases for\n indices and then using them for slicing. \n "],"tags":{},"see":[], "features":[ { "id":"We can add labels to tensors through lists or maps passed to the \"label()\" method.", "result":"PASS", - "duration":"0.004 seconds", + "duration":"0.012 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -43,7 +43,7 @@ { "id":"A matrix (rank 2 tensor) can be labeled and their labels can be used to extract slices / subsets.", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -84,7 +84,7 @@ { "id":"Rank 3 tensors can be labeled and their labels can be used to extract slices / subsets of tensors.", "result":"PASS", - "duration":"0.003 seconds", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -129,7 +129,7 @@ { "id":"A tensor can be labeled partially.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.introductions.Tensor_NDArray_Spec.json b/docs/spock/reports/ut.introductions.Tensor_NDArray_Spec.json index 4f5af0af0..8ca9667ae 100644 --- a/docs/spock/reports/ut.introductions.Tensor_NDArray_Spec.json +++ b/docs/spock/reports/ut.introductions.Tensor_NDArray_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.introductions.Tensor_NDArray_Spec", "title":"Tensors or Nd-arrays", - "narrative":"*What is the difference?*\n\nIn the world of machine learning we use something called a **'tensor'** to represent data.\nThey might be called **'nd-arrays'** in some other frameworks,\nbut although they are very similar,\nthere are also some important distinctions to be made between these two concepts.\nBoth are at their core merely multidimensional arrays, however,\nthey are different in their typical usage and API.\nnd-arrays are merely used to represent any type of data as a\ncollection of elements in a multidimensional grid,\ntensors on the other hand have additional requirements.\nThey are a type of nd-array which stores numeric data\nas well as expose various mathematical operations for said data.\nIn that sense it is actually merely a more complex kind of number.\nThis concept actually comes from the field of physics,\nwhere it is used to represent a physical quantity.\n\nNeureka models both concepts through the `Tensor` and the `Nda` interfaces.\n`Nda` is an abbreviation of `NdArray`, and `Tensor` is an abbreviation of `Tensor`.\nThe `Tensor` type is a subtype of the `Nda` type, exposing additional methods\nlike for example `plus`, `minus`, `times` and `divide`.\nBoth can be instantiated through static factory methods (and a fluent builder API).", + "narrative":"*What is the difference?*\n\nIn the world of machine learning we use something called a **'tensor'** to represent data.\nThey might be called **'nd-arrays'** in some other frameworks,\nbut although they are very similar, \nthere are also some important distinctions to be made between these two concepts.\nBoth are at their core merely multidimensional arrays, however,\nthey are different in their typical usage and API.\nnd-arrays are merely used to represent any type of data as a \ncollection of elements in a multidimensional grid, \ntensors on the other hand have additional requirements.\nThey are a type of nd-array which stores numeric data \nas well as expose various mathematical operations for said data.\nIn that sense it is actually merely a more complex kind of number.\nThis concept actually comes from the field of physics, \nwhere it is used to represent a physical quantity.\n\nNeureka models both concepts through the `Tensor` and the `Nda` interfaces.\n`Nda` is an abbreviation of `NdArray`, and `Tensor` is an abbreviation of `Tensor`.\nThe `Tensor` type is a subtype of the `Nda` type, exposing additional methods\nlike for example `plus`, `minus`, `times` and `divide`.\nBoth can be instantiated through static factory methods (and a fluent builder API).", "subjects":["neureka.Nda","neureka.Tensor"], "statistics":{ "runs":"2", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.009 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"Tensor is a subtype of NdArray.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"We can use tensors for numeric calculations (but not nd-arrays).", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.math.BackendContext_Spec.json b/docs/spock/reports/ut.math.BackendContext_Spec.json index 6de53af54..7e1126ed3 100644 --- a/docs/spock/reports/ut.math.BackendContext_Spec.json +++ b/docs/spock/reports/ut.math.BackendContext_Spec.json @@ -4,19 +4,19 @@ "narrative":"This specification defines the expected behaviour of the backend context\n which should expose a convenient API to work with.\n This API should allow for tasks to be running on a given context\n which is important for testing and modularity not only\n during library startup but also throughout the runtime.", "subjects":["neureka.backend.api.Operation","neureka.backend.api.BackendContext"], "statistics":{ - "runs":"3", + "runs":"5", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.006 seconds" + "duration":"0.013 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"BackendContext instances can be created by cloning from Singleton instance.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -43,7 +43,7 @@ { "id":"BackendContext instances return Runner instances for easy visiting.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -66,7 +66,65 @@ }, { - "id":"BackendContext instances return Runner instances for easy visiting with return values.", + "id":"BackendContext instances return Runner instances for easy visiting with return values. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current thread local BackendContext instance.","code":["def current = Neureka.get().backend()"]}, + + {"kind":"and","text":"A clone is being created by calling \"clone()\" on the given context...","code":["def clone = current.clone()"]}, + + {"kind":"and","text":"We wrap a Runner instance around a wrapper which will test its methods!","code":["def run = runWrapper( clone.runner() )"]}, + + {"kind":"when","text":"Querying the thread local context inside the Runner...","code":["def innerContext = run { Neureka.get().backend() }"]}, + + {"kind":"and","text":"...also outside the Runner lambda...","code":["def outerContext = Neureka.get().backend()"]}, + + {"kind":"then","text":"These two context instances will be different objects!","code":["innerContext != outerContext"]}, + + {"kind":"and","text":"The inner context will in fact be the clone which provided the Runner!","code":["innerContext == clone"]}, + + {"kind":"and","text":"The outer context is as expected simply the current context.","code":["outerContext == current"]}, + + {"kind":"where","text":"The following conceptually identical Runner methods can be used:","code":{"runWrapper":["(BackendContext.Runner runner) -> { (arg) -> runner.call(arg) }","(BackendContext.Runner runner) -> { (arg) -> runner.invoke(arg) }","(BackendContext.Runner runner) -> { (arg) -> runner.runAndGet(arg) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"BackendContext instances return Runner instances for easy visiting with return values. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"The current thread local BackendContext instance.","code":["def current = Neureka.get().backend()"]}, + + {"kind":"and","text":"A clone is being created by calling \"clone()\" on the given context...","code":["def clone = current.clone()"]}, + + {"kind":"and","text":"We wrap a Runner instance around a wrapper which will test its methods!","code":["def run = runWrapper( clone.runner() )"]}, + + {"kind":"when","text":"Querying the thread local context inside the Runner...","code":["def innerContext = run { Neureka.get().backend() }"]}, + + {"kind":"and","text":"...also outside the Runner lambda...","code":["def outerContext = Neureka.get().backend()"]}, + + {"kind":"then","text":"These two context instances will be different objects!","code":["innerContext != outerContext"]}, + + {"kind":"and","text":"The inner context will in fact be the clone which provided the Runner!","code":["innerContext == clone"]}, + + {"kind":"and","text":"The outer context is as expected simply the current context.","code":["outerContext == current"]}, + + {"kind":"where","text":"The following conceptually identical Runner methods can be used:","code":{"runWrapper":["(BackendContext.Runner runner) -> { (arg) -> runner.call(arg) }","(BackendContext.Runner runner) -> { (arg) -> runner.invoke(arg) }","(BackendContext.Runner runner) -> { (arg) -> runner.runAndGet(arg) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"BackendContext instances return Runner instances for easy visiting with return values. [2]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.math.ConCat_Spec.json b/docs/spock/reports/ut.math.ConCat_Spec.json index 10468990f..7b67d942e 100644 --- a/docs/spock/reports/ut.math.ConCat_Spec.json +++ b/docs/spock/reports/ut.math.ConCat_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.math.ConCat_Spec", "title":"Merging Tensors", - "narrative":"Tensors can not only be sliced, but also merged.\n This is most easily achieved through the concatenation operation,\n which stacks 2 tensors alongside a specified axis.\n This specification not only covers how you can concatenate tensors,\n but also how this works alongside autograd and non-numeric tensors.", + "narrative":"Tensors can not only be sliced, but also merged.\n This is most easily achieved through the concatenation operation, \n which stacks 2 tensors alongside a specified axis.\n This specification not only covers how you can concatenate tensors,\n but also how this works alongside autograd and non-numeric tensors.", "subjects":[], "statistics":{ - "runs":"5", + "runs":"7", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.017 seconds" + "duration":"0.060 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can concatenate 2 tensors alongside a specified axis!", "result":"PASS", - "duration":"0.005 seconds", + "duration":"0.011 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -41,7 +41,7 @@ { "id":"We can concatenate 2 float tensors alongside a specified axis!", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.008 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -66,7 +66,7 @@ { "id":"We can concatenate 2 string tensors alongside a specified axis!", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -85,9 +85,36 @@ }, { - "id":"We can concatenate and then back-propagate 2 simple float tensors alongside a specified axis!", + "id":"We can concatenate and then back-propagate 2 simple float tensors alongside a specified axis! [0]", + "result":"PASS", + "duration":"0.008 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create 2 rank 2 tensors, which we want to concatenate, where both require gradients.","code":["var a = Tensor.of(Float, [3, 1], [8, -4, 7]).setRqsGradient(true)","var b = Tensor.of(Float).withShape(3, 1).andFill(5, -1, 2).setRqsGradient(true)"]}, + + {"kind":"and","text":"A function which should perform the concatenation.","code":["var cat = Function.of('concat(I[0], I[1])')"]}, + + {"kind":"when","text":"We call the previously created function alongside the axis alongside we want to concatenate.","code":["var c = cat.with(Arg.Axis.of(1)).call(a, b)"]}, + + {"kind":"then","text":"The resulting tensor should have the expected shape.","code":["c.shape() == [3, 2]"]}, + + {"kind":"when","text":"","code":["var y = c / 2"]}, + + {"kind":"and","text":"","code":["y.backward(Tensor.ofFloats().withShape(3,2).andFill(-1, 2, 0.5, 3, -0.1, 4))"]}, + + {"kind":"then","text":"","code":["a.gradient.get().items == [-0.5, 0.25, -0.05] as float[]","b.gradient.get().items == [1.0, 1.5, 2.0] as float[]"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","Device.get(OpenCLDevice, 'gpu')"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can concatenate and then back-propagate 2 simple float tensors alongside a specified axis! [1]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -112,9 +139,36 @@ }, { - "id":"We can concatenate and then back-propagate 3 simple float tensors alongside a specified axis!", + "id":"We can concatenate and then back-propagate 3 simple float tensors alongside a specified axis! [0]", + "result":"PASS", + "duration":"0.007 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create 2 rank 2 tensors, which we want to concatenate, where both require gradients.","code":["var a = Tensor.of(Float, [1, 3], [8, -4, 7]).setRqsGradient(true)","var b = Tensor.of(Float).withShape(1, 3).andFill(5, -1, 2).setRqsGradient(true)","var c = Tensor.ofRandom(Float, 1, 3).setRqsGradient(true)"]}, + + {"kind":"and","text":"A function which should perform the concatenation.","code":["var cat = Function.of('concat(I[0], I[1], I[2])')"]}, + + {"kind":"when","text":"We call the previously created function alongside the axis alongside we want to concatenate.","code":["var d = cat.with(Arg.Axis.of(0)).call(a, b, c)"]}, + + {"kind":"then","text":"The resulting tensor should have the expected shape.","code":["d.shape() == [3, 3]"]}, + + {"kind":"when","text":"","code":["var y = d ** 2"]}, + + {"kind":"and","text":"","code":["y.backward(Tensor.ofFloats().withShape(3,3).andFill(-1, 2, 0.5, 3, -0.1, 4))"]}, + + {"kind":"then","text":"","code":["a.gradient.get().items == [-16, -16, 7] as float[]","b.gradient.get().items == [30, 0.2, 16] as float[]","c.gradient.get().items == [0.30829078, -3.1254156, -0.52700233] as float[]"]}, + + {"kind":"where","text":"","code":{"device":["CPU.get()","Device.get(OpenCLDevice, 'gpu')"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can concatenate and then back-propagate 3 simple float tensors alongside a specified axis! [1]", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.math.Function_Exception_Spec.json b/docs/spock/reports/ut.math.Function_Exception_Spec.json index c8c8d3306..7166aaef1 100644 --- a/docs/spock/reports/ut.math.Function_Exception_Spec.json +++ b/docs/spock/reports/ut.math.Function_Exception_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.001 seconds" + "duration":"0.006 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"Function throws exception when not enough inputs provided.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,7 +37,7 @@ { "id":"Function throws exception when arity does not match input number.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.math.Function_Parsing_Spec.json b/docs/spock/reports/ut.math.Function_Parsing_Spec.json index d6d152d14..8cd12533f 100644 --- a/docs/spock/reports/ut.math.Function_Parsing_Spec.json +++ b/docs/spock/reports/ut.math.Function_Parsing_Spec.json @@ -4,17 +4,17 @@ "narrative":"Neureka uses the 'Function' interface as a representation of a\n nested structure of operations.\n This means that a 'Function' is simply an abstract syntax trees made up of other 'Function' implementations\n which are assembled together by a parser receiving a string expression.\n In this specification we ensure that function expressions will be properly parsed into\n 'Function' implementations.", "subjects":["neureka.math.Function","neureka.math.parsing.FunctionParser"], "statistics":{ - "runs":"3", + "runs":"37", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.020 seconds" + "duration":"0.039 seconds" }, "headers":["\n This specification ensures that functions can be created from String expressions\n using factory methods on the interface neureka.math.Function interface.\n The implementation details as to how exactly this leads to an abstract syntax tree\n will not be covered here.\n This is because the parsing procedure is rather complex and the only thing we care about \n is the result. \n
        \n Within a given expression String passed to the parser, function inputs are\n recognized by 'I[j]', 'Ij' or 'ij', where j is the input index.\n Functions accept arrays as their inputs,\n which is why variables must be targeted in such a way.\n There are also many mathematical function like 'sig(..)', 'tanh(..)', 'sin(..)', 'cos(..)' \n and many more which are recognised by the parser. \n Other than that the syntax is rather mundane with respect to traditional\n operations like for example plus '+', minus '-', times '*', ... etc.
        \n "],"tags":{},"see":[], "features":[ { - "id":"Test parsed equations when building Function instances.", + "id":"Test parsed equations when building Function instances. [0]", "result":"PASS", "duration":"0", "iterations":{ @@ -29,7 +29,418 @@ }, { - "id":"Parsed equations throw expected error messages.", + "id":"Test parsed equations when building Function instances. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [18]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [19]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [20]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [21]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [22]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [23]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test parsed equations when building Function instances. [24]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will be parsed as expected.","code":["Function.of(equation).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"fast_tanh(i0*i1)\"","\"ig0*(igj)xI[g1]\"","\"sumJs(ij)\"","\"sumJs(1*(4-2/ij))\"","\"quadratic(sftpls(Ij))\"","\"softplus(I[3]**(3/i1)/sumJs(Ij**2)-23+I0/i1)\"","\"1+3+5-23+I0*45/(345-651**I3-6)\"","\"sin(23*i1)-cos(i0**0.3)+tanh(23)\"","\"4 *-2\"","\"fast_gaus(i0*i1)\"","\"2*3/1-2\"","\"3x5xI[4]xI[3]\"","\"[1,0, 5,3, 4]:(tanh(i0xi1))\"","\"[0,2, 1,3, -1](sig(I0))\"","\"I[0]<-I[1]\"","\"quadratic(I[0]) <- (I[1] <- I[2])\"","\"((tanh(i0)\"","'($$(gaus(i0*()'","\"rrlu(i0)\"","\"th(i0)*gaaus(i0+I1)\"","\"dimtrim(I[0])\"","\"add(I[0], 3, 3/I[1])\"","\"multiply(1, 4, -2, I[1])\"","\"divide(I[0], 3*I[1], I[3]-6)\"","\"i0@i1\""],"expected":["\"fast_tanh(I[0] * I[1])\"","\"((Ig[0] * Ig[j]) x Ig[1])\"","\"sumJs(I[j])\"","\"sumJs(1.0 * (4.0 - (2.0 / I[j])))\"","\"quad(softplus(I[j]))\"","\"softplus((((I[3] ** (3.0 / I[1])) / sumJs(I[j] ** 2.0)) - 23.0) + (I[0] / I[1]))\"","\"(1.0 + 3.0 + (5.0 - 23.0) + (I[0] * (45.0 / (345.0 - (651.0 ** I[3]) - 6.0))))\"","\"((sin(23.0 * I[1]) - cos(I[0] ** 0.3)) + tanh(23.0))\"","\"(4.0 * -2.0)\"","\"fast_gaus(I[0] * I[1])\"","\"((2.0 * (3.0 / 1.0)) - 2.0)\"","\"(((3.0 x 5.0) x I[4]) x I[3])\"","\"([1,0,5,3,4]:(tanh(I[0] x I[1])))\"","\"([0,2,1,3,-1]:(sig(I[0])))\"","\"(I[0] <- I[1])\"","\"(quad(I[0]) <- (I[1] <- I[2]))\"","\"tanh(I[0])\"","\"gaus(I[0] * 0.0)\"","\"relu(I[0])\"","\"(tanh(I[0]) * gaus(I[0] + I[1]))\"","\"dimtrim(I[0])\"","\"(I[0] + 3.0 + (3.0 / I[1]))\"","\"(1.0 * 4.0 * -2.0 * I[1])\"","\"(I[0] / (3.0 * I[1]) / (I[3] - 6.0))\"","\"(I[0] @ I[1])\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Parsed equations throw expected error messages. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We try to instantiate a Function by passing an expression String...","code":["Function.of(equation)"]}, + + {"kind":"then","text":"An exception is being thrown that contains the expected message!","code":["def error = thrown(IllegalArgumentException)","assert error.message==expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"softplus(I[0],I[1],I[2])\"","\"sig(I[0],I[1],I[2])\"","\"sumjs(I[0],I[1],I[2])\"","\"prodjs(I[0],I[1],I[2])\""],"expected":["\"The function/operation 'softplus' expects 1 parameters, however 3 where given!\"","\"The function/operation 'sig' expects 1 parameters, however 3 where given!\"","\"The function/operation 'sumJs' expects 1 parameters, however 3 where given!\\nNote: This function is an 'indexer'. Therefore it expects to sum variable 'I[j]' inputs, where 'j' is the index of an iteration.\"","\"The function/operation 'prodJs' expects 1 parameters, however 3 where given!\\nNote: This function is an 'indexer'. Therefore it expects to sum variable 'I[j]' inputs, where 'j' is the index of an iteration.\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Parsed equations throw expected error messages. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We try to instantiate a Function by passing an expression String...","code":["Function.of(equation)"]}, + + {"kind":"then","text":"An exception is being thrown that contains the expected message!","code":["def error = thrown(IllegalArgumentException)","assert error.message==expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"softplus(I[0],I[1],I[2])\"","\"sig(I[0],I[1],I[2])\"","\"sumjs(I[0],I[1],I[2])\"","\"prodjs(I[0],I[1],I[2])\""],"expected":["\"The function/operation 'softplus' expects 1 parameters, however 3 where given!\"","\"The function/operation 'sig' expects 1 parameters, however 3 where given!\"","\"The function/operation 'sumJs' expects 1 parameters, however 3 where given!\\nNote: This function is an 'indexer'. Therefore it expects to sum variable 'I[j]' inputs, where 'j' is the index of an iteration.\"","\"The function/operation 'prodJs' expects 1 parameters, however 3 where given!\\nNote: This function is an 'indexer'. Therefore it expects to sum variable 'I[j]' inputs, where 'j' is the index of an iteration.\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Parsed equations throw expected error messages. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We try to instantiate a Function by passing an expression String...","code":["Function.of(equation)"]}, + + {"kind":"then","text":"An exception is being thrown that contains the expected message!","code":["def error = thrown(IllegalArgumentException)","assert error.message==expected"]}, + + {"kind":"where","text":"The following expressions and expected exception messages are being used :","code":{"equation":["\"softplus(I[0],I[1],I[2])\"","\"sig(I[0],I[1],I[2])\"","\"sumjs(I[0],I[1],I[2])\"","\"prodjs(I[0],I[1],I[2])\""],"expected":["\"The function/operation 'softplus' expects 1 parameters, however 3 where given!\"","\"The function/operation 'sig' expects 1 parameters, however 3 where given!\"","\"The function/operation 'sumJs' expects 1 parameters, however 3 where given!\\nNote: This function is an 'indexer'. Therefore it expects to sum variable 'I[j]' inputs, where 'j' is the index of an iteration.\"","\"The function/operation 'prodJs' expects 1 parameters, however 3 where given!\\nNote: This function is an 'indexer'. Therefore it expects to sum variable 'I[j]' inputs, where 'j' is the index of an iteration.\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Parsed equations throw expected error messages. [3]", "result":"PASS", "duration":"0", "iterations":{ @@ -46,7 +457,82 @@ }, { - "id":"Functions can derive themselves according to the provided index of the input which ought to be derived.", + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [0]", + "result":"PASS", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will produce the expected derivative String.","code":["Function.of(equation).getDerivative( index ).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will produce the expected derivative String.","code":["Function.of(equation).getDerivative( index ).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will produce the expected derivative String.","code":["Function.of(equation).getDerivative( index ).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will produce the expected derivative String.","code":["Function.of(equation).getDerivative( index ).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will produce the expected derivative String.","code":["Function.of(equation).getDerivative( index ).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [5]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ @@ -58,6 +544,36 @@ {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} ], "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will produce the expected derivative String.","code":["Function.of(equation).getDerivative( index ).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Functions can derive themselves according to the provided index of the input which ought to be derived. [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"A Function created from a given expression will produce the expected derivative String.","code":["Function.of(equation).getDerivative( index ).toString() == expected"]}, + + {"kind":"where","text":"The following expressions and derivation indices are being used :","code":{"equation":["\"1 - I[0] * 3\"","\"i0 / 6\"","\"ln( 4 * i0 )\"","\"4**I[0]\"","\"i0 ** 3\"","\"(I[0] * I[1] * I[0]) + 3\"","\"3 ** (i0 / 2)\"","\"(2 * I[0]) / (1 - I[0] * 3)\""],"index":["0","0","0","0","0","0","0","0"],"expected":["\"-3.0\"","\"(1.0 / 6.0)\"","\"(4.0 / (4.0 * I[0]))\"","\"(ln(4.0) * (4.0 ** I[0]))\"","\"(3.0 * (I[0] ** (3.0 - 1.0)))\"","\"((I[1] * I[0]) + (I[0] * I[1]))\"","\"((1.0 / 2.0) * (ln(3.0) * (3.0 ** (I[0] / 2.0))))\"","\"((2.0 / (1.0 - (I[0] * 3.0))) - (((2.0 * I[0]) * -3.0) / ((1.0 - (I[0] * 3.0)) ** 2.0)))\""]}} + ], + "problems":{"dataValues":[], "errors":[]} } ], diff --git a/docs/spock/reports/ut.math.Function_Scalar_Spec.json b/docs/spock/reports/ut.math.Function_Scalar_Spec.json index aea751080..8a74eda38 100644 --- a/docs/spock/reports/ut.math.Function_Scalar_Spec.json +++ b/docs/spock/reports/ut.math.Function_Scalar_Spec.json @@ -1,20 +1,20 @@ { "className":"ut.math.Function_Scalar_Spec", "title":"Functions for Scalars", - "narrative":"The Function API and it's implementations\n receive and process arrays of scalars as arguments.\n Functions don't have to be used alongside tensors / nd-arrays,\n they can also compute derivatives based on scalar values.", + "narrative":"The Function API and it's implementations \n receive and process arrays of scalars as arguments.\n Functions don't have to be used alongside tensors / nd-arrays,\n they can also compute derivatives based on scalar values.", "subjects":[], "statistics":{ - "runs":"6", + "runs":"66", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.027 seconds" + "duration":"0.042 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Function \"1/I[0]\" instance returns expected scalar results.", + "id":"Function \"1/I[0]\" instance returns expected scalar results. [0]", "result":"PASS", "duration":"0", "iterations":{ @@ -31,7 +31,75 @@ }, { - "id":"Function \"I[0]+1/I[0]\" instance returns expected scalar results.", + "id":"Function \"1/I[0]\" instance returns expected scalar results. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Function instance from expression \"1/I[0]\".","code":["Function f = Function.of(\"1/I[0]\", false)"]}, + + {"kind":"expect","text":"The function yields expected scalar results when called.","code":["if (index!=null) assert f.derive( inputs, index ) == expected","else assert f.call( inputs ) == expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[2]","[2]"],"index":["0","null"],"expected":["-0.25","0.5"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"I[0]+1/I[0]\" instance returns expected scalar results. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Function instance from expression \"I[0]+1/I[0]\".","code":["Function f = Function.of(\"I[0]+1/I[0]\", false)"]}, + + {"kind":"expect","text":"The function yields expected scalar results when called.","code":["if (index!=null) assert f.derive( inputs, index ) == expected","else assert f.call( inputs ) == expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[2]","[-1]","[-3]","[0.2]"],"index":["null","0","0","0"],"expected":["2.5","0.0","0.8888888888888888","-23.999999999999996"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"I[0]+1/I[0]\" instance returns expected scalar results. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Function instance from expression \"I[0]+1/I[0]\".","code":["Function f = Function.of(\"I[0]+1/I[0]\", false)"]}, + + {"kind":"expect","text":"The function yields expected scalar results when called.","code":["if (index!=null) assert f.derive( inputs, index ) == expected","else assert f.call( inputs ) == expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[2]","[-1]","[-3]","[0.2]"],"index":["null","0","0","0"],"expected":["2.5","0.0","0.8888888888888888","-23.999999999999996"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"I[0]+1/I[0]\" instance returns expected scalar results. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Function instance from expression \"I[0]+1/I[0]\".","code":["Function f = Function.of(\"I[0]+1/I[0]\", false)"]}, + + {"kind":"expect","text":"The function yields expected scalar results when called.","code":["if (index!=null) assert f.derive( inputs, index ) == expected","else assert f.call( inputs ) == expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[2]","[-1]","[-3]","[0.2]"],"index":["null","0","0","0"],"expected":["2.5","0.0","0.8888888888888888","-23.999999999999996"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"I[0]+1/I[0]\" instance returns expected scalar results. [3]", "result":"PASS", "duration":"0", "iterations":{ @@ -48,7 +116,24 @@ }, { - "id":"Function \"(I[0]+1/I[0])**-I[0]\" instance returns expected scalar result.", + "id":"Function \"(I[0]+1/I[0])**-I[0]\" instance returns expected scalar result. [0]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Function instance from expression \"(I[0]+1/I[0])**-I[0]\".","code":["Function f = Function.of(\"(I[0]+1/I[0])**-I[0]\", false)"]}, + + {"kind":"expect","text":"The function yields expected scalar results when called.","code":["if (index!=null) assert f.derive( inputs, index ) == expected","else assert f.call( inputs ) == expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[ 1 ]","[ 0.2 ]"],"index":["null","0"],"expected":["0.5","-0.5217778675999797"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"(I[0]+1/I[0])**-I[0]\" instance returns expected scalar result. [1]", "result":"PASS", "duration":"0", "iterations":{ @@ -65,7 +150,109 @@ }, { - "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars.", + "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\", false)"]}, + + {"kind":"expect","text":"","code":["if ( index != null ) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[ 3 ]","[ 2.5]","[ 0 ]","[ 0 ]","[ 0.5]","[ 1.6]","[ -4 ]"],"index":["null","null","null","0","0","0","0"],"expected":["3.049021713079475","3.507365283517986","0.2","1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\", false)"]}, + + {"kind":"expect","text":"","code":["if ( index != null ) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[ 3 ]","[ 2.5]","[ 0 ]","[ 0 ]","[ 0.5]","[ 1.6]","[ -4 ]"],"index":["null","null","null","0","0","0","0"],"expected":["3.049021713079475","3.507365283517986","0.2","1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\", false)"]}, + + {"kind":"expect","text":"","code":["if ( index != null ) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[ 3 ]","[ 2.5]","[ 0 ]","[ 0 ]","[ 0.5]","[ 1.6]","[ -4 ]"],"index":["null","null","null","0","0","0","0"],"expected":["3.049021713079475","3.507365283517986","0.2","1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\", false)"]}, + + {"kind":"expect","text":"","code":["if ( index != null ) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[ 3 ]","[ 2.5]","[ 0 ]","[ 0 ]","[ 0.5]","[ 1.6]","[ -4 ]"],"index":["null","null","null","0","0","0","0"],"expected":["3.049021713079475","3.507365283517986","0.2","1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\", false)"]}, + + {"kind":"expect","text":"","code":["if ( index != null ) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[ 3 ]","[ 2.5]","[ 0 ]","[ 0 ]","[ 0.5]","[ 1.6]","[ -4 ]"],"index":["null","null","null","0","0","0","0"],"expected":["3.049021713079475","3.507365283517986","0.2","1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\", false)"]}, + + {"kind":"expect","text":"","code":["if ( index != null ) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[ 3 ]","[ 2.5]","[ 0 ]","[ 0 ]","[ 0.5]","[ 1.6]","[ -4 ]"],"index":["null","null","null","0","0","0","0"],"expected":["3.049021713079475","3.507365283517986","0.2","1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function \"(cos(I[0]*5)/5+I[0])*(1+sin(I[0])/2)\" instance returns expected scalars. [6]", "result":"PASS", "duration":"0", "iterations":{ @@ -82,7 +269,58 @@ }, { - "id":"Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance.", + "id":"Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\", false)"]}, + + {"kind":"expect","text":"","code":["if (index!=null) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]"],"index":["0","1","2","3"],"expected":["1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\", false)"]}, + + {"kind":"expect","text":"","code":["if (index!=null) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]"],"index":["0","1","2","3"],"expected":["1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Function f = Function.of(\"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\", false)"]}, + + {"kind":"expect","text":"","code":["if (index!=null) assert f.derive( inputs, index )==expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following input array, target derivative index and result scalar is used :","code":{"inputs":["[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]","[0.0, 0.5, 1.6, -4.0]"],"index":["0","1","2","3"],"expected":["1.1","0.646867884000033","-0.00697440343353687","3.9174193383745917"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of Function \"sumjs((cos(I[j]*5)/5+I[j])*(1+sin(I[j])/2))\" instance. [3]", "result":"PASS", "duration":"0", "iterations":{ @@ -99,7 +337,789 @@ }, { - "id":"Test scalar results of various Function instances.", + "id":"Test scalar results of various Function instances. [0]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [18]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [19]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [20]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [21]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [22]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [23]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [24]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [25]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [26]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [27]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [28]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [29]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [30]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [31]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [32]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [33]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [34]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [35]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [36]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [37]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [38]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [39]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [40]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [41]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [42]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [43]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [44]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [45]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new Function instance which is detached! (no autograd support)","code":["Function f = Function.of(equation, false)"]}, + + {"kind":"expect","text":"Calling the function will yield the expected result.","code":["if ( index!=null ) assert f.derive( inputs, index ) == expected","else assert f.call( inputs )==expected"]}, + + {"kind":"where","text":"The following parameters are used :","code":{"equation":["\"6/2*(1+2)\"","\"sumJs(Ij)\"","\"prod(Ij)\"","\"prod(prod(Ij))\"","\"I3/i[1]-I0+2+i2\"","\"i3*i1/(i4-i0-2)-sig(0)+tanh(0)\"","\"(i0*i1)*i2\"","\"softplus(i0*i1)*i2\"","\"prod(ij)\"","\"relu(prod(ij))\"","\"relu(prod(ij))\"","\"quad(prod(ij)+6)\"","\"quad(prod(ij)+6)\"","\"quad(abs(prod(ij))-6)\"","\"quad(abs(prod(ij))-6)\"","\"sumJs(ij)\"","\"sumJs(ij)\"","\"sumJs(ij**1)\"","\"sumJs(ij**1)\"","\"I[1]**2\"","\"I[1]**2\"","\"sumJs(ij**2)\"","\"sumJs(ij**2)\"","\"2**I[1]\"","\"2**I[0]\"","\"2**I[2]\"","\"2**I[1]\"","\"sumJs(2**I[j])\"","\"sumJs(2**I[j])\"","\"I[1]%2\"","\"I[1]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"I[2]%2\"","\"7%I[1]\"","\"7%I[1]\"","\"sum(7%ij)\""],"inputs":["[]","[2, 3.2, 6]","[0.5, 0.5, 100]","[0.5, 0.5, 10]","[5, 4, 3, 12]","[-4, -2, 6, -3, -8]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -2]","[2, 3, -5]","[2, 3, -5]","[2, 3, -5]","[2, 4, -5]","[2, 4, -5]","[2, 3, 4]"],"index":["null","null","null","null","null","null","0","0","1","null","1","null","1","null","1","null","1","null","1","null","1","null","1","null","null","null","1","null","1","null","1","null","2","1","null","1","2"],"expected":["9","11.2","25","(2.5 * 2.5 * 2.5)","3","-1.5","-6","-5.985164261060192","-4","-0.12","-0.04","36","-12*-4","36","-12*-4","3","1","3","1","9","6","17","6","8","4","0.25","5.545177444479562","12.25","5.545177444479562","1","1","-1","1","0","3","0 \n\"sum(7%i0)\" [2, 3, 4] null 3\n\"sum(7%ij)\" [2, 3, 4] null 5\n\"sum(7%i0)\" [2, 3, 4] 2 0","0 \n\"sum(i0%3)\" [2, 3, 4] 0 1\n\"sum(ij%3)\" [2, 3, 4] 2 1\n\"sum(ij-3)\" [1, 2, 4] null -2\n\"sum(ij-3)\" [1, 2, 4] 1 1\n\"sum(3-ij)\" [1, 2, 4] null 2\n\"sum(3-ij)\" [1, 2, 4] 1 -1\n\"sum(3-ij-ij)\" [1, 2, 4] 1 -2"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test scalar results of various Function instances. [46]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.math.Function_Spec.json b/docs/spock/reports/ut.math.Function_Spec.json index 123974395..7a9509260 100644 --- a/docs/spock/reports/ut.math.Function_Spec.json +++ b/docs/spock/reports/ut.math.Function_Spec.json @@ -4,19 +4,19 @@ "narrative":"This specification tests the default methods on functions\n through a simple dummy implementation of the Function interface.", "subjects":["java.util.function.Function"], "statistics":{ - "runs":"4", + "runs":"40", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.023 seconds" + "duration":"0.046 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Function implementations ensure that internally created tensors are flagged as \"intermediate\" initially!", + "id":"Function implementations ensure that internally created tensors are flagged as \"intermediate\" initially! [0]", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -43,12 +43,70 @@ }, { - "id":"Function implementations ensure that outputs which are input members are not flagged as \"intermediate\"!", + "id":"Function implementations ensure that internally created tensors are flagged as \"intermediate\" initially! [1]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"","code":["var fun1 = new DummyFunction((Args args, Tensor[] tensors) -> {"," var outputs = [Tensor.of(1d)]"," tensors.length.times { outputs.add(tensors[it]) }"," return outputs[0]"," })"]}, + + {"kind":"and","text":"","code":["var fun2 = new DummyFunction((Args args, Tensor[] tensors) -> {"," var outputs = [Tensor.of(1d)]"," tensors.length.times { outputs.add(tensors[it]) }"," return outputs[0].mut.setIsIntermediate(true)"," })"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(3d)","var b = Tensor.of(-2.5)"]}, + + {"kind":"expect","text":"","code":["!a.isIntermediate()","!b.isIntermediate()"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun1)"]}, + + {"kind":"then","text":"","code":["thrown(IllegalStateException)"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun2)"]}, + + {"kind":"then","text":"","code":["noExceptionThrown()"]}, + + {"kind":"where","text":"","code":{"caller":["{t1, t2, fun -> fun.call(t1, t2)}","{t1, t2, fun -> fun.invoke(t1, t2)}","{t1, t2, fun -> fun.execute(t1, t2)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function implementations ensure that internally created tensors are flagged as \"intermediate\" initially! [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var fun1 = new DummyFunction((Args args, Tensor[] tensors) -> {"," var outputs = [Tensor.of(1d)]"," tensors.length.times { outputs.add(tensors[it]) }"," return outputs[0]"," })"]}, + + {"kind":"and","text":"","code":["var fun2 = new DummyFunction((Args args, Tensor[] tensors) -> {"," var outputs = [Tensor.of(1d)]"," tensors.length.times { outputs.add(tensors[it]) }"," return outputs[0].mut.setIsIntermediate(true)"," })"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(3d)","var b = Tensor.of(-2.5)"]}, + + {"kind":"expect","text":"","code":["!a.isIntermediate()","!b.isIntermediate()"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun1)"]}, + + {"kind":"then","text":"","code":["thrown(IllegalStateException)"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun2)"]}, + + {"kind":"then","text":"","code":["noExceptionThrown()"]}, + + {"kind":"where","text":"","code":{"caller":["{t1, t2, fun -> fun.call(t1, t2)}","{t1, t2, fun -> fun.invoke(t1, t2)}","{t1, t2, fun -> fun.execute(t1, t2)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function implementations ensure that outputs which are input members are not flagged as \"intermediate\"! [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"","code":["var fun1 = new DummyFunction((Args args, Tensor[] tensors) -> tensors[0] )"]}, @@ -72,12 +130,70 @@ }, { - "id":"Function implementations will ensure the \"call\" and \"invoke\" does not return tensors flagged as \"intermediate\".", + "id":"Function implementations ensure that outputs which are input members are not flagged as \"intermediate\"! [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var fun1 = new DummyFunction((Args args, Tensor[] tensors) -> tensors[0] )"]}, + + {"kind":"and","text":"","code":["var fun2 = new DummyFunction((Args args, Tensor[] tensors) -> {"," return tensors[0].mut.setIsIntermediate( true ) // This should fail!"," })"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(3.0)","var b = Tensor.of(-2.5)"]}, + + {"kind":"expect","text":"","code":["!a.isIntermediate()","!b.isIntermediate()"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun1)"]}, + + {"kind":"then","text":"","code":["noExceptionThrown()"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun2)"]}, + + {"kind":"then","text":"","code":["thrown(IllegalStateException)"]}, + + {"kind":"where","text":"","code":{"caller":["{t1, t2, fun -> fun.call(t1, t2)}","{t1, t2, fun -> fun.invoke(t1, t2)}","{t1, t2, fun -> fun.execute(t1, t2)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function implementations ensure that outputs which are input members are not flagged as \"intermediate\"! [2]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"","code":["var fun1 = new DummyFunction((Args args, Tensor[] tensors) -> tensors[0] )"]}, + + {"kind":"and","text":"","code":["var fun2 = new DummyFunction((Args args, Tensor[] tensors) -> {"," return tensors[0].mut.setIsIntermediate( true ) // This should fail!"," })"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(3.0)","var b = Tensor.of(-2.5)"]}, + + {"kind":"expect","text":"","code":["!a.isIntermediate()","!b.isIntermediate()"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun1)"]}, + + {"kind":"then","text":"","code":["noExceptionThrown()"]}, + + {"kind":"when","text":"","code":["caller(a, b, fun2)"]}, + + {"kind":"then","text":"","code":["thrown(IllegalStateException)"]}, + + {"kind":"where","text":"","code":{"caller":["{t1, t2, fun -> fun.call(t1, t2)}","{t1, t2, fun -> fun.invoke(t1, t2)}","{t1, t2, fun -> fun.execute(t1, t2)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Function implementations will ensure the \"call\" and \"invoke\" does not return tensors flagged as \"intermediate\".", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"","code":["var fun = new DummyFunction((Args args, Tensor[] tensors) -> {"," return Tensor.of(42f).mut.setIsIntermediate(true)"," })"]}, @@ -93,9 +209,553 @@ }, { - "id":"The library context exposes a set of useful functions.", + "id":"The library context exposes a set of useful functions. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [9]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [18]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [19]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [20]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [21]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [22]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [23]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [24]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [25]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [26]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [27]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [28]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [29]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [30]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [31]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"","code":["fun.apply(Neureka.get().backend.function).toString() == expected","!fun.apply(Neureka.get().backend.function).isDoingAD()"]}, + + {"kind":"and","text":"","code":["fun.apply(Neureka.get().backend.autogradFunction).toString() == expected","fun.apply(Neureka.get().backend.autogradFunction).isDoingAD()"]}, + + {"kind":"where","text":"","code":{"expected":["'ln(I[0])'","'ln(I[0])'","'gaus(I[0])'","'gaus(I[0])'","'fast_gaus(I[0])'","'fast_gaus(I[0])'","'sig(I[0])'","'sig(I[0])'","'tanh(I[0])'","'tanh(I[0])'","'fast_tanh(I[0])'","'fast_tanh(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'softsign(I[0])'","'quad(I[0])'","'quad(I[0])'","'relu(I[0])'","'relu(I[0])'","'abs(I[0])'","'abs(I[0])'","'sin(I[0])'","'sin(I[0])'","'cos(I[0])'","'cos(I[0])'","'softplus(I[0])'","'softplus(I[0])'","'silu(I[0])'","'silu(I[0])'","'gelu(I[0])'","'gelu(I[0])'","'selu(I[0])'","'selu(I[0])'"],"fun":["{Functions it -> it.ln }","{Functions it -> it.ln() }","{Functions it -> it.gaus }","{Functions it -> it.gaus() }","{Functions it -> it.fastGaus }","{Functions it -> it.fastGaus() }","{Functions it -> it.sigmoid }","{Functions it -> it.sigmoid() }","{Functions it -> it.tanh }","{Functions it -> it.tanh() }","{Functions it -> it.fastTanh }","{Functions it -> it.fastTanh() }","{Functions it -> it.softsign }","{Functions it -> it.softsign() }","{Functions it -> it.softsign }","{Functions it -> it.quad() }","{Functions it -> it.quad }","{Functions it -> it.relu() }","{Functions it -> it.relu }","{Functions it -> it.abs() }","{Functions it -> it.abs }","{Functions it -> it.sin() }","{Functions it -> it.sin }","{Functions it -> it.cos() }","{Functions it -> it.cos }","{Functions it -> it.softplus() }","{Functions it -> it.softplus }","{Functions it -> it.silu() }","{Functions it -> it.silu }","{Functions it -> it.gelu() }","{Functions it -> it.gelu }","{Functions it -> it.selu() }","{Functions it -> it.selu }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The library context exposes a set of useful functions. [32]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.math.Tensor_Function_Spec.json b/docs/spock/reports/ut.math.Tensor_Function_Spec.json index c2563cf38..bc5e8b516 100644 --- a/docs/spock/reports/ut.math.Tensor_Function_Spec.json +++ b/docs/spock/reports/ut.math.Tensor_Function_Spec.json @@ -4,19 +4,19 @@ "narrative":"A tensor would be nothing without being able to apply operations on them.\n However, calling operations manually in order to process your\n tensors can be a verbose and error prone task.\n This is where functions come into play.\n Neureka's functions are composed of operations forming an abstract syntax tree.\n Passing tensors to a function will route them trough this tree and apply\n all of the operations on the tensors for you.", "subjects":["neureka.Tensor","neureka.math.Function"], "statistics":{ - "runs":"10", + "runs":"52", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.080 seconds" + "duration":"0.179 seconds" }, "headers":[" \n This specification ensures that tensors supplied\n to functions are executed successfully and produce the expected results.\n "],"tags":{},"see":[], "features":[ { "id":"The tensor API has built-in methods for applying functions.", "result":"PASS", - "duration":"0.004 seconds", + "duration":"0.008 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"The softmax function can be applied to tensors with more than one dimension.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,7 @@ { "id":"The softmax can be calculated for a particular axis.", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.008 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -77,7 +77,7 @@ { "id":"The softmax can be calculated alongside multiple axes.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -96,7 +96,7 @@ { "id":"The optimization function for the SGD algorithm produces the expected result", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -117,9 +117,9 @@ }, { - "id":"Tensor results of various Function instances return expected results.", + "id":"Tensor results of various Function instances return expected results. [0]", "result":"PASS", - "duration":"0.019 seconds", + "duration":"0.006 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -142,83 +142,1133 @@ }, { - "id":"Reshaping on 3D tensors works by instantiate a Function instance built from a String.", + "id":"Tensor results of various Function instances return expected results. [1]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","var f = Function.of(\"[2, 0, 1]:(I[0])\")"]}, + {"kind":"given","text":"We set the experimental \"autoConvertToFloat\" flag to true.","code":["Neureka.get().backend().find(CLBackend).ifPresent({ it.settings.autoConvertToFloat=true })"]}, - {"kind":"when","text":"","code":["when : var t = Tensor.of([3, 4, 2], 1d..5d)"]}, + {"kind":"and","text":"","code":["and : \"A new Function instance created from ${equation}.\"","Function f = Function.of(equation, true) // TODO : test with 'doAD' : false!"]}, - {"kind":"then","text":"","code":["then : t.toString().contains(\"[3x4x2]:(1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0)\")"]}, + {"kind":"and","text":"","code":["inputs.each {it.to(Device.get(device))}"]}, - {"kind":"when","text":"","code":["when : var r = f(t)"]}, + {"kind":"and","text":"The result is being calculated by invoking the Function instance.","code":["Tensor result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, - {"kind":"then","text":"","code":["r.toString().contains(\"[2x3x4]\")","r.toString().contains(\"[2x3x4]:(1.0, 3.0, 5.0, 2.0, 4.0, 1.0, 3.0, 5.0, 2.0, 4.0, 1.0, 3.0, 2.0, 4.0, 1.0, 3.0, 5.0, 2.0, 4.0, 1.0, 3.0, 5.0, 2.0, 4.0)\")"]} + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0..d(\")"]}, + {"kind":"and","text":"","code":["inputs.each {it.to(Device.get(device))}"]}, - {"kind":"when","text":"","code":["var back = trimmed.backward()"]}, + {"kind":"and","text":"The result is being calculated by invoking the Function instance.","code":["Tensor result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, - {"kind":"then","text":"","code":["back == trimmed"]}, + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, - {"kind":"then","text":"The \"call\" method will not return an intermediate result.","code":["!result1.isIntermediate()"]}, + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0.. result = ( index != null ? f.derive( inputs, index ) : f.call( inputs ) )","List value = result.getItemsAs(double[].class) as List"]}, + + {"kind":"expect","text":"","code":["expect : \"The calculated result ${result} should be (ruffly) equal to expected ${expected}.\"","(0..d(\")"]}, + + {"kind":"when","text":"","code":["var back = trimmed.backward()"]}, + + {"kind":"then","text":"","code":["back == trimmed"]}, + + {"kind":"and","text":"","code":["t.gradient.get().toString() == \"(1x1x3x2x1):[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\""]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Executed tensors are intermediate tensors.", + "result":"PASS", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Functions expose different kinds of methods for different kinds of\n purposes, however there is one species of methods with a very important role\n in ensuring memory efficiency.\n These types of methods are the `execute` methods which \n distinguish themselves in that the tensors returned by \n these methods are flagged as \"intermediate\".\n If a tensor is an intermediate one, it becomes eligible \n for deletion when consumed by another function.\n Note that internally every function is usually a composite\n of other functions forming a syntax tree which will process\n input tensors through the execute methods, which causes\n intermediate results to be deleted automatically.\n When executing a function as a user of Neureka\n one should generally avoid using the `execute` method in order to avoid\n accidental deletion of results.\n This is mostly relevant for when designing custom operations.\n "] + }, + "blocks":[ + {"kind":"given","text":"We create a simple function taking one input.","code":["var fun = Function.of('i0 * relu(i0) + 1')"]}, + + {"kind":"and","text":"A vector tensor of 5 float numbers","code":["var t = Tensor.of(1f, -5f, -3f, 2f, 8f)"]}, + + {"kind":"expect","text":"Both the tensor as well as the function were created successfully.","code":["t.itemType == Float","fun.toString() == \"((I[0] * relu(I[0])) + 1.0)\""]}, + + {"kind":"when","text":"We try different kinds of ways of passing the tensor to the function...","code":["var result1 = fun.call(t)","var result2 = fun.invoke(t)","var result3 = fun.execute(t)"]}, + + {"kind":"then","text":"The \"call\" method will not return an intermediate result.","code":["!result1.isIntermediate()"]}, + + {"kind":"and","text":"The functionally identical synonym method \"invoke\" will also yield a non-intermediate result.","code":["!result2.isIntermediate()"]}, + + {"kind":"and","text":"As expected, the tensor of the \"execute\" method is indeed intermediate.","code":["result3.isIntermediate()"]}, + + {"kind":"and","text":"Otherwise all 3 tensors are basically the same.","code":["result1.toString() == \"(5):[2.0, 1.25, 1.09, 5.0, 65.0]\"","result2.toString() == \"(5):[2.0, 1.25, 1.09, 5.0, 65.0]\"","result3.toString() == \"(5):[2.0, 1.25, 1.09, 5.0, 65.0]\""]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can collect a stream into a tensor.", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"We create a stream of integers.","code":["var stream = Stream.of(1, 2, 3, 4, 5, 6)"]}, diff --git a/docs/spock/reports/ut.miscellaneous.Weired_NN_Spec.json b/docs/spock/reports/ut.miscellaneous.Weired_NN_Spec.json index 68cf2a58b..93e44631a 100644 --- a/docs/spock/reports/ut.miscellaneous.Weired_NN_Spec.json +++ b/docs/spock/reports/ut.miscellaneous.Weired_NN_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.019 seconds" + "duration":"0.036 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"Dot based feed forward and activation produces expected result.", "result":"PASS", - "duration":"0.017 seconds", + "duration":"0.034 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndas.Nda_Assign_Spec.json b/docs/spock/reports/ut.ndas.Nda_Assign_Spec.json index 65baab9ad..fe23b2131 100644 --- a/docs/spock/reports/ut.ndas.Nda_Assign_Spec.json +++ b/docs/spock/reports/ut.ndas.Nda_Assign_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.002 seconds" + "duration":"0.007 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can use the \"mut\" API to assign the contents of one nd-array into another one.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"Assignment can be easily achieved through subscription operators.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,7 @@ { "id":"We can assign one slice into another one.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Using the 'assign' operation on slices should be handled with care,\n since the operation has side effects on the underlying data array\n which is shared by both the slice and its parent.\n Use the 'copy' operation on slices if you want to avoid this.\n "] }, diff --git a/docs/spock/reports/ut.ndas.Nda_Framing_Spec.json b/docs/spock/reports/ut.ndas.Nda_Framing_Spec.json index 5118d38fc..6d8f0a0ba 100644 --- a/docs/spock/reports/ut.ndas.Nda_Framing_Spec.json +++ b/docs/spock/reports/ut.ndas.Nda_Framing_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.ndas.Nda_Framing_Spec", "title":"Nda framing", - "narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n\n This is also true for labeling operations,\n meaning that the Nda API does not directly expose methods that mutate labels of an Nda\n but instead provides methods that return new instances of Nda\n with different labels.\n\n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!", + "narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n\n This is also true for labeling operations, \n meaning that the Nda API does not directly expose methods that mutate labels of an Nda\n but instead provides methods that return new instances of Nda\n with different labels.\n\n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!", "subjects":["neureka.Nda"], "statistics":{ "runs":"6", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.015 seconds" + "duration":"0.028 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"An Nda can be labeled.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,7 +35,7 @@ { "id":"We can label the columns of a rank 2 nd-array.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,7 @@ { "id":"We can label the columns and rows of a rank 3 nd-array.", "result":"PASS", - "duration":"0", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -69,7 +69,7 @@ { "id":"We can use labels as selectors for slicing.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -90,7 +90,7 @@ { "id":"The slice of a labeled vector is labeled too.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -111,7 +111,7 @@ { "id":"Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndas.Nda_Inplace_Framing_Spec.json b/docs/spock/reports/ut.ndas.Nda_Inplace_Framing_Spec.json index e8b6229c8..4e486603d 100644 --- a/docs/spock/reports/ut.ndas.Nda_Inplace_Framing_Spec.json +++ b/docs/spock/reports/ut.ndas.Nda_Inplace_Framing_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.005 seconds" + "duration":"0.023 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can label the columns of a rank 2 nd-array.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"We can label the columns and rows of a rank 3 nd-array.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -50,7 +50,7 @@ { "id":"We can use labels as selectors for slicing.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -71,7 +71,7 @@ { "id":"The slice of a labeled vector is labeled too.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -92,7 +92,7 @@ { "id":"Concatenating 2 labeled nd-arrays will produce a nd-array which is also labeled.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -113,7 +113,7 @@ { "id":"We can concatenate more than 2 nd-arrays.", "result":"PASS", - "duration":"0", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndas.Nda_Instantiation_Spec.json b/docs/spock/reports/ut.ndas.Nda_Instantiation_Spec.json index 0fa1eadc1..60f15dfb2 100644 --- a/docs/spock/reports/ut.ndas.Nda_Instantiation_Spec.json +++ b/docs/spock/reports/ut.ndas.Nda_Instantiation_Spec.json @@ -4,19 +4,19 @@ "narrative":"In this specification we cover how ND-arrays can be instantiated.", "subjects":["neureka.Nda"], "statistics":{ - "runs":"3", + "runs":"8", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.004 seconds" + "duration":"0.016 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A vector can be created from an array of values through the \"of\" method.", "result":"PASS", - "duration":"0", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,11 +37,34 @@ }, { - "id":"ND-arrays can be created fluently.", + "id":"ND-arrays can be created fluently. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Nda instance using a fluent builder API.","code":["Nda t = Nda.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type!","code":["t.itemType() == type"]}, + + {"kind":"and","text":"...all items of the array will have the same value, which is the one we passed to the fluent builder.","code":["t.every((Predicate){ it == value })"]}, + + {"kind":"and","text":"The nd-array will have the shape we passed to the builder.","code":["t.shape == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries: 2 * 3 = 6.","code":["t.size == 6"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["42 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 42 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ND-arrays can be created fluently. [1]", "result":"PASS", "duration":"0", "iterations":{ - "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!"] + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] }, "blocks":[ {"kind":"given","text":"We create a new homogeneously filled Nda instance using a fluent builder API.","code":["Nda t = Nda.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, @@ -60,10 +83,102 @@ }, { - "id":"Common types of nd-arrays are best instantiated using type specific convenience methods.", + "id":"ND-arrays can be created fluently. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Nda instance using a fluent builder API.","code":["Nda t = Nda.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type!","code":["t.itemType() == type"]}, + + {"kind":"and","text":"...all items of the array will have the same value, which is the one we passed to the fluent builder.","code":["t.every((Predicate){ it == value })"]}, + + {"kind":"and","text":"The nd-array will have the shape we passed to the builder.","code":["t.shape == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries: 2 * 3 = 6.","code":["t.size == 6"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["42 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 42 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ND-arrays can be created fluently. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Nda instance using a fluent builder API.","code":["Nda t = Nda.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type!","code":["t.itemType() == type"]}, + + {"kind":"and","text":"...all items of the array will have the same value, which is the one we passed to the fluent builder.","code":["t.every((Predicate){ it == value })"]}, + + {"kind":"and","text":"The nd-array will have the shape we passed to the builder.","code":["t.shape == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries: 2 * 3 = 6.","code":["t.size == 6"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["42 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 42 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ND-arrays can be created fluently. [4]", "result":"PASS", "duration":"0", "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Nda instance using a fluent builder API.","code":["Nda t = Nda.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type!","code":["t.itemType() == type"]}, + + {"kind":"and","text":"...all items of the array will have the same value, which is the one we passed to the fluent builder.","code":["t.every((Predicate){ it == value })"]}, + + {"kind":"and","text":"The nd-array will have the shape we passed to the builder.","code":["t.shape == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries: 2 * 3 = 6.","code":["t.size == 6"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["42 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 42 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ND-arrays can be created fluently. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Nda instance using a fluent builder API.","code":["Nda t = Nda.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type!","code":["t.itemType() == type"]}, + + {"kind":"and","text":"...all items of the array will have the same value, which is the one we passed to the fluent builder.","code":["t.every((Predicate){ it == value })"]}, + + {"kind":"and","text":"The nd-array will have the shape we passed to the builder.","code":["t.shape == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries: 2 * 3 = 6.","code":["t.size == 6"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["42 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 42 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Common types of nd-arrays are best instantiated using type specific convenience methods.", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ diff --git a/docs/spock/reports/ut.ndas.Nda_Items_Spec.json b/docs/spock/reports/ut.ndas.Nda_Items_Spec.json index 16307b698..4658d04ba 100644 --- a/docs/spock/reports/ut.ndas.Nda_Items_Spec.json +++ b/docs/spock/reports/ut.ndas.Nda_Items_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.ndas.Nda_Items_Spec", "title":"The Nds Items API", - "narrative":"Nd-arrays are collections of items similar to other\n collection types in Java.\n One useful way to access the items of an nd-array is\n to use the items API.\n\n Using the `at` methods we can access an `Item` object\n which is a wrapper around the item's value and its\n index in the nd-array.\n\n The `Item` object is a simple data class which\n is very similar to the `Optional` class, meaning\n that it can either be empty or contain a value.", + "narrative":"Nd-arrays are collections of items similar to other\n collection types in Java. \n One useful way to access the items of an nd-array is\n to use the items API.\n\n Using the `at` methods we can access an `Item` object\n which is a wrapper around the item's value and its\n index in the nd-array.\n\n The `Item` object is a simple data class which\n is very similar to the `Optional` class, meaning\n that it can either be empty or contain a value.", "subjects":[], "statistics":{ "runs":"6", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.004 seconds" + "duration":"0.010 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can check if items of a tensor is present or not.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -31,7 +31,7 @@ { "id":"We can get the value of an item.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -46,7 +46,7 @@ { "id":"The \"get\" method of an Item object will throw an exception if the item is missing.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Similar as the `Optional` class, an `Item` object can be empty.\n If we try to get the value of an empty item, an exception will be thrown.\n The reason for this is that we can not be sure that the item is actually\n empty or if it is just not present in the nd-array.\n If you want to get an item's value without throwing an exception \n (but the risk of getting a null value instead) you can use the `orElseNull` method.\n "] }, @@ -82,7 +82,7 @@ { "id":"An item can be converted to an Optional object.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -97,7 +97,7 @@ { "id":"Other than the \"orElse(T)\" method of the Optional class, the same method of an Item will throw an exception if the provided value is null.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n If you want to get an item's value without throwing an exception \n (but the risk of getting a null value instead) you can use the `orElseNull` method.\n The `orElse(T)` method of the `Optional` class will not throw an exception\n if the provided value is null. This is not the case for the `orElse(T)` method\n of an `Item` object.\n "] }, diff --git a/docs/spock/reports/ut.ndas.Nda_Mutation_Spec.json b/docs/spock/reports/ut.ndas.Nda_Mutation_Spec.json index 81a96e1f7..81b974bde 100644 --- a/docs/spock/reports/ut.ndas.Nda_Mutation_Spec.json +++ b/docs/spock/reports/ut.ndas.Nda_Mutation_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.ndas.Nda_Mutation_Spec", "title":"Mutating ND-Arrays", - "narrative":"ND-Arrays should be considered immutable, so we should prefer creating new\n ND-Arrays from existing ones using wither methods.\n However this is not always a good idea as it can be expensive to create new\n ND-Arrays, especially if the ND-Array is very large.\n The ability to mutate ND-Arrays is therefore provided, but only\n accessible via the mutation API exposed by the `getMut()` method.", + "narrative":"ND-Arrays should be considered immutable, so we should prefer creating new \n ND-Arrays from existing ones using wither methods.\n However this is not always a good idea as it can be expensive to create new\n ND-Arrays, especially if the ND-Array is very large.\n The ability to mutate ND-Arrays is therefore provided, but only\n accessible via the mutation API exposed by the `getMut()` method.", "subjects":[], "statistics":{ "runs":"6", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.004 seconds" + "duration":"0.011 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A simple vector ND-Array can be mutated using the \"setItemAt\" method.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"A ND-Array can be mutated simply using the \"set\" method.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n This method of mutation is best used in Kotlin where it translates\n to the \"set\" operator.\n So it is possible to write code like this: `nda[2, 3] = 42.0`\n "] }, @@ -50,7 +50,7 @@ { "id":"A simple vector ND-Array can be mutated using the \"at(..).set(..)\" methods.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -67,7 +67,7 @@ { "id":"A ND-Array can be mutated using the \"at(..).set(..)\" methods.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -101,7 +101,7 @@ { "id":"We can use the subscription operator to mutate an ND-Array.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndas.Nda_Reshape_Spec.json b/docs/spock/reports/ut.ndas.Nda_Reshape_Spec.json index e6491d8f7..35cc90493 100644 --- a/docs/spock/reports/ut.ndas.Nda_Reshape_Spec.json +++ b/docs/spock/reports/ut.ndas.Nda_Reshape_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.ndas.Nda_Reshape_Spec", "title":"Nda Reshaping", - "narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n\n This is also true for reshaping operations,\n meaning that the Nda API does not expose methods that mutate the shape of an Nda\n but instead provides methods that return new instances of Nda\n with a different shape.\n\n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!", + "narrative":"Immutability is a core concept of the Neureka library.\n This means that the Nda API does not expose mutability directly.\n Instead, the API exposes methods that return new instances of Nda\n that are derived from the original instance.\n\n This is also true for reshaping operations, \n meaning that the Nda API does not expose methods that mutate the shape of an Nda\n but instead provides methods that return new instances of Nda\n with a different shape.\n\n Don't be concerned about the performance implications of this,\n because in the vast majority of cases the new instance will be backed by the same data array\n as the original instance!", "subjects":["neureka.Nda"], "statistics":{ "runs":"1", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.001 seconds" + "duration":"0.003 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can create a new Nda instance with a different shape.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndim.NDConfiguration_Spec.json b/docs/spock/reports/ut.ndim.NDConfiguration_Spec.json index 1e676db60..1d918ceb0 100644 --- a/docs/spock/reports/ut.ndim.NDConfiguration_Spec.json +++ b/docs/spock/reports/ut.ndim.NDConfiguration_Spec.json @@ -1,22 +1,573 @@ { "className":"ut.ndim.NDConfiguration_Spec", "title":"Making Arrays N-Dimensional", - "narrative":"Under the hood Neureka implements powerful indexing\n abstractions through the `NDConfiguration` interface and its various implementations.\n This allows for the creation of tensors/nd-arrays with arbitrary dimensions,\n the ability to slice them into smaller tensors/nd-arrays with the same underlying data,\n and finally the ability to permute their axes (like transposing them for example).\n\n This specification however only focuses on the behaviour of the `NDConfiguration` interface\n which translates various types of indices.", + "narrative":"Under the hood Neureka implements powerful indexing \n abstractions through the `NDConfiguration` interface and its various implementations.\n This allows for the creation of tensors/nd-arrays with arbitrary dimensions, \n the ability to slice them into smaller tensors/nd-arrays with the same underlying data,\n and finally the ability to permute their axes (like transposing them for example).\n\n This specification however only focuses on the behaviour of the `NDConfiguration` interface\n which translates various types of indices.", "subjects":["neureka.ndim.config.NDConfiguration"], "statistics":{ - "runs":"1", + "runs":"20", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.024 seconds" + "duration":"0.055 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Various NDConfigurations behave exactly like their general purpose implementation.", + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [0]", "result":"PASS", - "duration":"0.008 seconds", + "duration":"0.013 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [1]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [2]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [3]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [10]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [13]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [17]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [18]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A general purpose NDConfiguration implementation as well as a specialized one (provided by the factory method).","code":["var ndc1 = SlicedNDConfiguration.construct(shape, translation, indicesMap, spread, offset)","var ndc2 = NDConfiguration.of(shape, translation, indicesMap, spread, offset)"]}, + + {"kind":"and","text":"2 corresponding iterators:","code":["var i1 = NDIterator.of(ndc1, NDIterator.NonVirtual.FALSE)","var i2 = NDIterator.of(ndc2, NDIterator.NonVirtual.FALSE)"]}, + + {"kind":"expect","text":"","code":["ndc2.getClass() == expected","ndc1.rank() == ndc2.rank()","ndc1.size() == ndc2.size()","ndc1.shape() == ndc2.shape()","ndc1.strides() == ndc2.strides()","ndc1.indicesMap() == ndc2.indicesMap()","ndc1.spread() == ndc2.spread()","ndc1.offset() == ndc2.offset()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indicesOfIndex(it) == ndc2.indicesOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndices(ndc1.indicesOfIndex(it)) == ndc2.indexOfIndices(ndc2.indicesOfIndex(it))","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," ndc1.indexOfIndex(it) == ndc2.indexOfIndex(it)","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," if ( it < ndc1.size()-1 ) { i1.increment(); i2.increment() }"," return matches","})",".every()"]}, + + {"kind":"and","text":"","code":["(0..ndc1.size()-1).collect({"," boolean matches = i1.get() == i2.get()"," i1.decrement(); i2.decrement()"," return matches","})",".every()"]}, + + {"kind":"where","text":"","code":{"shape":["[2,3,8,4]","[2,3,8,4]","[2,3,8,4]","[2,3,8]","[2,3,8]","[2,3,8]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[2,3]","[3]","[42]","[3]","[30]","[2]","[1]","[1]"],"translation":["[96, 32, 4, 1]","[96, 200, 8, 1]","[96, 32, 4, 1]","[24,8,1]","[8,24,7]","[8,24,7]","[3,1]","[1,2]","[1,2]","[81,42]","[1,2]","[3,1]","[3,1]","[1]","[1]","[1]","[8]","[1]","[1]","[1]"],"indicesMap":["[96, 32, 4, 1]","[96, 32, 4, 1]","[96, 92, 4, 1]","[24,8,1]","[1,2,3]","[1,2,3]","[3,1]","[1,2]","[3,1]","[3,99]","[2,1]","[3,1]","[3,1]","[1]","[1]","[2]","[2]","[1]","[1]","[1]"],"spread":["[1,1,1,1]","[1,1,1,1]","[1,4,1,1]","[1, 1, 1]","[1, 1, 1]","[1, 7, 1]","[1, 1]","[1, 1]","[1, 1]","[1, 1]","[7, 2]","[1, 1]","[1, 2]","[1]","[1]","[1]","[1]","[1]","[1]","[1]"],"offset":["[0,0,0,0]","[0,0,0,0]","[0,0,0,0]","[0,0,0]","[0,0,0]","[0,0,0]","[0,0]","[0,0]","[0,0]","[0,0]","[1,8]","[6,0]","[0,0]","[0]","[0]","[0]","[0]","[5]","[0]","[3]"],"expected":["SimpleNDConfiguration","PermutedNDConfiguration","SlicedNDConfiguration","Simple3DConfiguration","Permuted3DConfiguration","Sliced3DConfiguration","Simple2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Permuted2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Sliced2DConfiguration","Simple1DConfiguration","Simple1DConfiguration","Permuted1DConfiguration","Permuted1DConfiguration","Sliced1DConfiguration","Simple0DConfiguration","Sliced0DConfiguration"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various NDConfigurations behave exactly like their general purpose implementation. [19]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndim.Nda_Permute_Spec.json b/docs/spock/reports/ut.ndim.Nda_Permute_Spec.json index 8e8845c5b..d40ba5575 100644 --- a/docs/spock/reports/ut.ndim.Nda_Permute_Spec.json +++ b/docs/spock/reports/ut.ndim.Nda_Permute_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.ndim.Nda_Permute_Spec", "title":"Reshaping Nd-Arrays", - "narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It returns a new nd-array with the same data as the original nd-array,\n but with the specified dimensions rearranged.\n It is very useful for example when you want to\n change the order of dimensions, for example, if you have a nd-array with dimensions (batch_size, channels, height, width),\n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns),\n you can use permute() to rearrange the dimensions to (columns, rows).\n\n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern.", + "narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It returns a new nd-array with the same data as the original nd-array, \n but with the specified dimensions rearranged. \n It is very useful for example when you want to\n change the order of dimensions, for example, if you have a nd-array with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n\n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern.", "subjects":["neureka.Nda"], "statistics":{ "runs":"2", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.006 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can use the \"permute\" method to rearrange the dimensions of an nd-array.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n In Neureka `Nda::permute(int...)` rearranges the original nd-array according to the desired \n ordering and returns a new multidimensional rotated nd-array. \n The size of the returned nd-array remains the same as that of the original.\n "] }, @@ -33,7 +33,7 @@ { "id":"We can use the \"transpose\" method to transpose swap 2 dimensions.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n The `transpose` method is a special case of the `permute` method\n which only swaps 2 dimensions (instead of all of them).\n It is based on the algorithm of the `permute` method.\n "] }, diff --git a/docs/spock/reports/ut.ndim.Shape_Spec.json b/docs/spock/reports/ut.ndim.Shape_Spec.json index 0c2d38bc1..c47d2baff 100644 --- a/docs/spock/reports/ut.ndim.Shape_Spec.json +++ b/docs/spock/reports/ut.ndim.Shape_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.019 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A shape can be created from a list of integers.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -31,7 +31,7 @@ { "id":"A shape can be created from a stream of ints.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -46,7 +46,7 @@ { "id":"A shape can be created from an iterable.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -61,7 +61,7 @@ { "id":"A shape can be mapped to a new shape.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Note that as a tuple, the shape is immutable so you cannot change its values.\n But as a monad, the shape can be mapped to a new shape\n using the \"map\" method. :)\n "] }, @@ -78,7 +78,7 @@ { "id":"A shape can be sliced.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n This is similar as the \"subList\" method of the java.util.List interface.\n It returns a new shape which is a slice of the original shape\n starting at the given index and ending at the given index.\n "] }, @@ -95,7 +95,7 @@ { "id":"Use the \"any\" or \"every\" method to check if a predicate holds for any or every value of the shape.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n The `Shape` class allows you to check if a condition holds for any or every value of the shape\n in a functional way by simply passing a predicate to the \"any\" or \"every\" method.\n This allows for much more readable code than using a for-loop.\n "] }, @@ -116,7 +116,7 @@ { "id":"You can use the \"count(Predicate)\" method to count the number of values that satisfy a predicate.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndim.Tensor_NDConfiguration_Spec.json b/docs/spock/reports/ut.ndim.Tensor_NDConfiguration_Spec.json index 9cff1f6b9..82f5b7ba2 100644 --- a/docs/spock/reports/ut.ndim.Tensor_NDConfiguration_Spec.json +++ b/docs/spock/reports/ut.ndim.Tensor_NDConfiguration_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.011 seconds" }, "headers":["\n For certain situations the \"Tensor\" class should use the correct \n implementations of said interface as configuration for internal index mapping...\n\n "],"tags":{},"see":[], "features":[ { "id":"NDConfiguration instances of tensors have expected state.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,7 +35,7 @@ { "id":"NDConfiguration instances of tensors have expected state and behaviour.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.006 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndim.Tensor_Permute_Spec.json b/docs/spock/reports/ut.ndim.Tensor_Permute_Spec.json index 7249e2b02..5431ef048 100644 --- a/docs/spock/reports/ut.ndim.Tensor_Permute_Spec.json +++ b/docs/spock/reports/ut.ndim.Tensor_Permute_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.ndim.Tensor_Permute_Spec", "title":"Reshaping Tensors", - "narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It produces a new tensor with the same data as the original tensor,\n but with the specified dimensions rearranged.\n\n This is very useful for example when you want to\n change the order of dimensions, for example, if you have a tensor with dimensions (batch_size, channels, height, width),\n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns),\n you can use permute() to rearrange the dimensions to (columns, rows).\n\n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern.", + "narrative":"Permuting an N-dimensional array means rearranging the dimensions/axes of the N-dimensional array.\n It produces a new tensor with the same data as the original tensor, \n but with the specified dimensions rearranged. \n\n This is very useful for example when you want to\n change the order of dimensions, for example, if you have a tensor with dimensions (batch_size, channels, height, width), \n you can use permute() to rearrange the dimensions to (batch_size, height, width, channels).\n Another useful application of permute() is transposing a matrix.\n For example, if you have a matrix with dimensions (rows, columns), \n you can use permute() to rearrange the dimensions to (columns, rows).\n\n Permuting is a very cheap operation because it does not copy any data but merely\n creates a new view on the same data with a different access pattern.", "subjects":["neureka.Tensor"], "statistics":{ "runs":"2", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.009 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can use the \"permute\" method to rearrange the dimensions of a tensor.", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n In Neureka `Tensor::permute(int...)` rearranges the original tensor according to the desired \n ordering and returns a new multidimensional rotated tensor. \n The size of the returned tensor remains the same as that of the original.\n "] }, @@ -39,7 +39,7 @@ { "id":"When matrices are transpose, they will change their layout type as expected.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.ndim.Tensor_Slice_Permute_Spec.json b/docs/spock/reports/ut.ndim.Tensor_Slice_Permute_Spec.json index f296f46f1..02b5cb151 100644 --- a/docs/spock/reports/ut.ndim.Tensor_Slice_Permute_Spec.json +++ b/docs/spock/reports/ut.ndim.Tensor_Slice_Permute_Spec.json @@ -1,7 +1,7 @@ { "className":"ut.ndim.Tensor_Slice_Permute_Spec", "title":"Permuting Slices of Tensors", - "narrative":"Neureka provides a convenient way to permuting tensors\n even if they are slices of other tensors sharing the same underlying data.\n This is possible because of the under the hood indexing\n abstractions provided by the `NDConfiguration` interface and its various implementations.", + "narrative":"Neureka provides a convenient way to permuting tensors\n even if they are slices of other tensors sharing the same underlying data.\n This is possible because of the under the hood indexing \n abstractions provided by the `NDConfiguration` interface and its various implementations.", "subjects":["neureka.Tensor"], "statistics":{ "runs":"3", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.005 seconds" + "duration":"0.011 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A slice of a tensor changes as expected when reshaping it.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -45,7 +45,7 @@ { "id":"Two slices of one big tensor perform matrix multiplication flawless.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -66,7 +66,7 @@ { "id":"Reshaping a slice works as expected.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.neureka.Neureka_Spec.json b/docs/spock/reports/ut.neureka.Neureka_Spec.json index 0193f3580..e4f97362d 100644 --- a/docs/spock/reports/ut.neureka.Neureka_Spec.json +++ b/docs/spock/reports/ut.neureka.Neureka_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.neureka.Neureka_Spec", "title":"The Neureka context can be used and configured as expected.", - "narrative":"This specification covers the behavior of the Neureka class which\n exposes a global API for configuring thread local contexts and library settings.\n The purpose of this is to assert that the API exposed by the Neureka class\n is both thread local and configurable.\n This specification also exists to cover standards for the Neureka library in general.", + "narrative":"This specification covers the behavior of the Neureka class which\n exposes a global API for configuring thread local contexts and library settings.\n The purpose of this is to assert that the API exposed by the Neureka class \n is both thread local and configurable.\n This specification also exists to cover standards for the Neureka library in general.", "subjects":["neureka.Neureka"], "statistics":{ - "runs":"6", + "runs":"82", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"8.598 seconds" + "skipped":"1", + "duration":"4.898 seconds" }, "headers":["\n This specification defines what types of settings are exposed by\n Neureka as well as more general things like how string representations\n of various library types should look like... \n "],"tags":{},"see":[], "features":[ { "id":"Neureka class instance has expected behaviour.", "result":"PASS", - "duration":"0.049 seconds", + "duration":"0.084 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -27,15 +27,294 @@ {"kind":"then","text":"This setting change applies!","code":["Neureka.get().settings().autograd().isApplyingGradientWhenTensorIsUsed()","!Neureka.get().settings().autograd().isRetainingPendingErrorForJITProp()"]}, - {"kind":"and","text":"The version number is as expected!","code":["Neureka.version()==\"1.0.0\"//version"]} + {"kind":"and","text":"The version number is as expected!","code":["Neureka.version()==\"1.0.1\"//version"]} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Neureka settings class can be locked causing its properties to be immutable.", + "id":"Neureka settings class can be locked causing its properties to be immutable. [0]", "result":"PASS", - "duration":"0.337 seconds", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [1]", + "result":"PASS", + "duration":"0.077 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [2]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [3]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [4]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [5]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [6]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [7]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [8]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Something used to set a property and something to get the property.","code":["def set = { it -> setter(Neureka.get().settings(), it) }","def get = { getter(Neureka.get().settings()) }"]}, + + {"kind":"expect","text":"Initially the property has the expected value.","code":["get() == value"]}, + + {"kind":"when","text":"We lock the settings object...","code":["Neureka.get().settings().setIsLocked(true)"]}, + + {"kind":"and","text":"We try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is not changed!","code":["get() == value"]}, + + {"kind":"when","text":"We unlock the settings object...","code":["Neureka.get().settings().setIsLocked(false)"]}, + + {"kind":"and","text":"Again we try to set the property to another value...","code":["set(!value)"]}, + + {"kind":"then","text":"The property is changed!","code":["get() != value"]}, + + {"kind":"cleanup","text":"We reset the settings object to its original state.","code":["set(value)"]}, + + {"kind":"where","text":"The properties used are boolean types.","code":{"value":["false","true","false","true","false","false","true","false","false","false"],"getter":["{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsLegacy()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasGradient()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getHasSlimNumbers()}","{ Neureka.Settings it -> it.view().getNDPrintSettings().getIsScientific()}","{ Neureka.Settings it -> it.ndim().isOnlyUsingDefaultNDConfiguration()}","{ Neureka.Settings it -> it.debug().isKeepingDerivativeTargetPayloads()}","{ Neureka.Settings it -> it.autograd().isPreventingInlineOperations()}","{ Neureka.Settings it -> it.autograd().isRetainingPendingErrorForJITProp()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenTensorIsUsed()}","{ Neureka.Settings it -> it.autograd().isApplyingGradientWhenRequested()}"],"setter":["{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsLegacy(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasGradient(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setHasSlimNumbers(v)}","{ Neureka.Settings s, v -> s.view().getNDPrintSettings().setIsScientific(v)}","{ Neureka.Settings s, v -> s.ndim().setIsOnlyUsingDefaultNDConfiguration(v)}","{ Neureka.Settings s, v -> s.debug().setIsKeepingDerivativeTargetPayloads(v)}","{ Neureka.Settings s, v -> s.autograd().setIsPreventingInlineOperations(v)}","{ Neureka.Settings s, v -> s.autograd().setIsRetainingPendingErrorForJITProp(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenTensorIsUsed(v)}","{ Neureka.Settings s, v -> s.autograd().setIsApplyingGradientWhenRequested(v)}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Neureka settings class can be locked causing its properties to be immutable. [9]", + "result":"PASS", + "duration":"0.054 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -66,58 +345,1170 @@ { "id":"Every Thread instance has their own Neureka instance.", "result":"PASS", - "duration":"5.725 seconds", + "duration":"0.067 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A map containing entries for Neureka instances.","code":["def map = ['instance 1':null, 'instance 2':null]"]}, + + {"kind":"when","text":"Two newly instantiated tensors store their Neureka instances in the map.","code":["def t1 = new Thread({ map['instance 1'] = Neureka.get() })","def t2 = new Thread({ map['instance 2'] = Neureka.get() })"]}, + + {"kind":"and","text":"The tensors are being started and joined.","code":["t1.start()","t2.start()","t1.join()","t2.join()"]}, + + {"kind":"then","text":"The map entries will no longer be filled with null.","code":["map['instance 1'] != null","map['instance 2'] != null"]}, + + {"kind":"and","text":"The Neureka instances stored in the map will be different objects.","code":["map['instance 1'] != map['instance 2']"]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [0]", + "result":"PASS", + "duration":"0.059 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [1]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [2]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [3]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [4]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [5]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [6]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [7]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [8]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [9]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [10]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [11]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [12]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [13]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [14]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [15]", + "result":"PASS", + "duration":"0.091 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [16]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [17]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [18]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [19]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [20]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [21]", + "result":"PASS", + "duration":"0.050 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Various library objects adhere to the same toString formatting convention! [22]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"OpenCL related library objects adhere to the same toString formatting convention!", + "result":"IGNORED", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaCLObject.toString()).matches()"]}, + + {"kind":"where","text":"The following objects are being used..","code":{"neurekaCLObject":["Neureka.get().backend.find(CLBackend).get()","Neureka.get().backend.find(CLBackend).get().platforms0]","Neureka.get().backend.find(CLBackend).get().platforms0].devices0]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [0]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [1]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [2]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [3]", + "result":"PASS", + "duration":"0.056 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [4]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [5]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [6]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [7]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [8]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [9]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [10]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [11]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [12]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [13]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [14]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [15]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [16]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [17]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [18]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [19]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [20]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [21]", + "result":"PASS", + "duration":"0.078 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [22]", + "result":"PASS", + "duration":"0.059 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [23]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [24]", + "result":"PASS", + "duration":"0.094 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [25]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [26]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [27]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [28]", + "result":"PASS", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"A map containing entries for Neureka instances.","code":["def map = ['instance 1':null, 'instance 2':null]"]}, + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, - {"kind":"when","text":"Two newly instantiated tensors store their Neureka instances in the map.","code":["def t1 = new Thread({ map['instance 1'] = Neureka.get() })","def t2 = new Thread({ map['instance 2'] = Neureka.get() })"]}, + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, - {"kind":"and","text":"The tensors are being started and joined.","code":["t1.start()","t2.start()","t1.join()","t2.join()"]}, + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [29]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, - {"kind":"then","text":"The map entries will no longer be filled with null.","code":["map['instance 1'] != null","map['instance 2'] != null"]}, + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, - {"kind":"and","text":"The Neureka instances stored in the map will be different objects.","code":["map['instance 1'] != map['instance 2']"]} + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Various library objects adhere to the same toString formatting convention!", + "id":"Backend related library objects adhere to the same toString formatting convention! [30]", "result":"PASS", - "duration":"0.746 seconds", + "duration":"0.057 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaObject.toString()).matches()"]}, + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, - {"kind":"where","text":"The following objects are being used..","code":{"neurekaObject":["CPU.get()","DataType.of(String)","Relation.newParentToChildren()","new JITProp<>(] as Set)","Neureka.get()","Neureka.get().settings()","Neureka.get().settings().autograd()","Neureka.get().settings().debug()","Neureka.get().settings().dtype()","Neureka.get().settings().ndim()","Neureka.get().settings().view()","Neureka.get().backend().getAutogradFunction()","Neureka.get().backend().getFunction()","Neureka.get().backend()","Neureka.get().backend().getFunctionCache()","ExecutionCall.of(Tensor.of(3d)).running(Neureka.get().backend().getOperation(\"+\")).on(CPU.get())","new CustomDeviceCleaner()","(Tensor.of(2d).setRqsGradient(true)* Tensor.of(-2d)).graphNode.get()","FileDevice.at('.')","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 200, 8, 1],(int])96, 32, 4, 1],(int])1,1,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8,4],(int])96, 32, 4, 1],(int])96, 92, 4, 1],(int])1,4,1,1],(int])0,0,0,0])","NDConfiguration.of((int])2,3,8],(int])24,8,1],(int])24,8,1],(int])1, 1, 1],(int])0,0,0])"]}} + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"OpenCL related library objects adhere to the same toString formatting convention!", + "id":"Backend related library objects adhere to the same toString formatting convention! [31]", "result":"PASS", - "duration":"0.090 seconds", + "duration":"0.058 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(neurekaCLObject.toString()).matches()"]}, + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, - {"kind":"where","text":"The following objects are being used..","code":{"neurekaCLObject":["Neureka.get().backend.find(CLBackend).get()","Neureka.get().backend.find(CLBackend).get().platforms0]","Neureka.get().backend.find(CLBackend).get().platforms0].devices0]"]}} + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [32]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [33]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [34]", + "result":"PASS", + "duration":"0.056 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [35]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [36]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [37]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [38]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [39]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [40]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [41]", + "result":"PASS", + "duration":"0.275 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [42]", + "result":"PASS", + "duration":"0.061 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [43]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [44]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Backend related library objects adhere to the same toString formatting convention! [45]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The provided object matches the following pattern defining a common standard!","code":["toStringStandard.matcher(operation.toString()).matches()"]}, + + {"kind":"and","text":"The same criteria should also be met for every algorithm within the current operation.","code":["operation.getAllAlgorithms().every {"," toStringStandard.matcher(it.toString()).matches()","}"]}, + + {"kind":"where","text":"The following operations are being used..","code":{"operation":[]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Backend related library objects adhere to the same toString formatting convention!", + "id":"Backend related library objects adhere to the same toString formatting convention! [46]", "result":"PASS", - "duration":"1.571 seconds", + "duration":"0.065 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.optimization.ADAM_Spec.json b/docs/spock/reports/ut.optimization.ADAM_Spec.json index 6ce7d8637..1663346ef 100644 --- a/docs/spock/reports/ut.optimization.ADAM_Spec.json +++ b/docs/spock/reports/ut.optimization.ADAM_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.optimization.ADAM_Spec", "title":"", - "narrative":"ADAM is a more powerful alternative to the classical stochastic gradient descent.\n It combines the best properties of the AdaGrad and the RMSProp algorithms, which makes\n it especially well suited for sparse gradients and noisy data.\n Adam is the most popular among the adaptive optimizers\n because its adaptive learning rate working so well with sparse datasets.", + "narrative":"ADAM is a more powerful alternative to the classical stochastic gradient descent. \n It combines the best properties of the AdaGrad and the RMSProp algorithms, which makes \n it especially well suited for sparse gradients and noisy data.\n Adam is the most popular among the adaptive optimizers\n because its adaptive learning rate working so well with sparse datasets.", "subjects":["neureka.optimization.Optimizer"], "statistics":{ - "runs":"3", + "runs":"19", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.050 seconds" + "duration":"0.092 seconds" }, "headers":["\n The code below assumes that for we\n have the following 2 variables setup\n throughout every data table iteration:\n ```\n Tensor w = Tensor.of(0d)\n Optimizer o = Optimizer.ADAM.create(w) \n w.set(o) \n ```\n "],"tags":{},"see":[], "features":[ { - "id":"ADAM optimizes according to expected inputs", + "id":"ADAM optimizes according to expected inputs [0]", "result":"PASS", - "duration":"0.026 seconds", + "duration":"0.010 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,9 +33,231 @@ }, { - "id":"Equations used by ADAM return expected result.", + "id":"ADAM optimizes according to expected inputs [1]", "result":"PASS", - "duration":"0.011 seconds", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [2]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [3]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [4]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [5]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [6]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [7]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [8]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"ADAM optimizes according to expected inputs [9]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.009999999666666677","0.01999","0.02426","0.03034","0.03332","0.03409","0.03738","0.04194","0.04744","0.05112"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations used by ADAM return expected result. [0]", + "result":"PASS", + "duration":"0.010 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create tensors given an equation and array or list of input tensors...","code":["var t1 = Tensor.of( expression, inputs )","var t2 = Tensor.of( expression, inputs as Float[] )","var t3 = Tensor.of( expression, true, inputs.collect(it -> Tensor.of(it) ) )","var t4 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) )","var t5 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) as Tensor[] )"]}, + + {"kind":"expect","text":"...this produces the expected result String.","code":["t1.toString().contains( expected )","t2.toString().contains( expected.replace(\".29999\", \".30000\") )","t3.toString().contains( expected )","t4.toString().contains( expected )","t5.toString().contains( expected )"]}, + + {"kind":"where","text":"The following expressions, inputs and expected String results are being used :","code":{"expression":["\"( 1 - I[0]) * I[1]\"","\"I[0] * I[1] + (1 - I[2]) * I[3]\"","\"I[0] / ( 1 - I[1] )\"","\"I[0] ** 0.5 + I[1]\"","\"I[0] - I[1] * I[2] /( I[3] ** 0.5 + I[4] )\""],"inputs":["[0.9d, -3d]","[0.9d, 0d, 0.9d, -3d]","[-0.3d, 0.9d]","[9d, 1e-7d]","[0d, 0.01d, -3d, 9d, 1e-7d ]"],"expected":["\"(1):[-0.29999]\"","\"(1):[-0.29999]\"","\"(1):[-3.0]\"","\"(1):[3.0]\"","\"(1):[0.00999]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations used by ADAM return expected result. [1]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create tensors given an equation and array or list of input tensors...","code":["var t1 = Tensor.of( expression, inputs )","var t2 = Tensor.of( expression, inputs as Float[] )","var t3 = Tensor.of( expression, true, inputs.collect(it -> Tensor.of(it) ) )","var t4 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) )","var t5 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) as Tensor[] )"]}, + + {"kind":"expect","text":"...this produces the expected result String.","code":["t1.toString().contains( expected )","t2.toString().contains( expected.replace(\".29999\", \".30000\") )","t3.toString().contains( expected )","t4.toString().contains( expected )","t5.toString().contains( expected )"]}, + + {"kind":"where","text":"The following expressions, inputs and expected String results are being used :","code":{"expression":["\"( 1 - I[0]) * I[1]\"","\"I[0] * I[1] + (1 - I[2]) * I[3]\"","\"I[0] / ( 1 - I[1] )\"","\"I[0] ** 0.5 + I[1]\"","\"I[0] - I[1] * I[2] /( I[3] ** 0.5 + I[4] )\""],"inputs":["[0.9d, -3d]","[0.9d, 0d, 0.9d, -3d]","[-0.3d, 0.9d]","[9d, 1e-7d]","[0d, 0.01d, -3d, 9d, 1e-7d ]"],"expected":["\"(1):[-0.29999]\"","\"(1):[-0.29999]\"","\"(1):[-3.0]\"","\"(1):[3.0]\"","\"(1):[0.00999]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations used by ADAM return expected result. [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create tensors given an equation and array or list of input tensors...","code":["var t1 = Tensor.of( expression, inputs )","var t2 = Tensor.of( expression, inputs as Float[] )","var t3 = Tensor.of( expression, true, inputs.collect(it -> Tensor.of(it) ) )","var t4 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) )","var t5 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) as Tensor[] )"]}, + + {"kind":"expect","text":"...this produces the expected result String.","code":["t1.toString().contains( expected )","t2.toString().contains( expected.replace(\".29999\", \".30000\") )","t3.toString().contains( expected )","t4.toString().contains( expected )","t5.toString().contains( expected )"]}, + + {"kind":"where","text":"The following expressions, inputs and expected String results are being used :","code":{"expression":["\"( 1 - I[0]) * I[1]\"","\"I[0] * I[1] + (1 - I[2]) * I[3]\"","\"I[0] / ( 1 - I[1] )\"","\"I[0] ** 0.5 + I[1]\"","\"I[0] - I[1] * I[2] /( I[3] ** 0.5 + I[4] )\""],"inputs":["[0.9d, -3d]","[0.9d, 0d, 0.9d, -3d]","[-0.3d, 0.9d]","[9d, 1e-7d]","[0d, 0.01d, -3d, 9d, 1e-7d ]"],"expected":["\"(1):[-0.29999]\"","\"(1):[-0.29999]\"","\"(1):[-3.0]\"","\"(1):[3.0]\"","\"(1):[0.00999]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations used by ADAM return expected result. [3]", + "result":"PASS", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -50,7 +272,75 @@ }, { - "id":"Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results.", + "id":"Equations used by ADAM return expected result. [4]", + "result":"PASS", + "duration":"0.008 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create tensors given an equation and array or list of input tensors...","code":["var t1 = Tensor.of( expression, inputs )","var t2 = Tensor.of( expression, inputs as Float[] )","var t3 = Tensor.of( expression, true, inputs.collect(it -> Tensor.of(it) ) )","var t4 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) )","var t5 = Tensor.of( expression, false, inputs.collect(it -> Tensor.of(it) ) as Tensor[] )"]}, + + {"kind":"expect","text":"...this produces the expected result String.","code":["t1.toString().contains( expected )","t2.toString().contains( expected.replace(\".29999\", \".30000\") )","t3.toString().contains( expected )","t4.toString().contains( expected )","t5.toString().contains( expected )"]}, + + {"kind":"where","text":"The following expressions, inputs and expected String results are being used :","code":{"expression":["\"( 1 - I[0]) * I[1]\"","\"I[0] * I[1] + (1 - I[2]) * I[3]\"","\"I[0] / ( 1 - I[1] )\"","\"I[0] ** 0.5 + I[1]\"","\"I[0] - I[1] * I[2] /( I[3] ** 0.5 + I[4] )\""],"inputs":["[0.9d, -3d]","[0.9d, 0d, 0.9d, -3d]","[-0.3d, 0.9d]","[9d, 1e-7d]","[0d, 0.01d, -3d, 9d, 1e-7d ]"],"expected":["\"(1):[-0.29999]\"","\"(1):[-0.29999]\"","\"(1):[-3.0]\"","\"(1):[3.0]\"","\"(1):[0.00999]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["given : Function f = Function.of(expression)"]}, + + {"kind":"expect","text":"","code":["expect : output == f(input)"]}, + + {"kind":"where","text":"The following expressions, inputs and expected String results are being used :","code":{"expression":["\"I[0]*I[1]+(1-I[2])*I[3]\"","\"I[0]*I[1]+(1-I[2])*I[3]\"","\"(1-I[0])*I[1]\"","\"(1-I[0])*I[1]\""],"input":["new double[]{0.9, 0.0, 0.9, -3.0}","new double[]{-0.9, 2.0, 0.2, 1.0}","new double[]{0.9, -3.0}","new double[]{-0.9, 2.0}"],"output":["-0.29999999999999993","-1.0","-0.29999999999999993","3.8"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["given : Function f = Function.of(expression)"]}, + + {"kind":"expect","text":"","code":["expect : output == f(input)"]}, + + {"kind":"where","text":"The following expressions, inputs and expected String results are being used :","code":{"expression":["\"I[0]*I[1]+(1-I[2])*I[3]\"","\"I[0]*I[1]+(1-I[2])*I[3]\"","\"(1-I[0])*I[1]\"","\"(1-I[0])*I[1]\""],"input":["new double[]{0.9, 0.0, 0.9, -3.0}","new double[]{-0.9, 2.0, 0.2, 1.0}","new double[]{0.9, -3.0}","new double[]{-0.9, 2.0}"],"output":["-0.29999999999999993","-1.0","-0.29999999999999993","3.8"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["given : Function f = Function.of(expression)"]}, + + {"kind":"expect","text":"","code":["expect : output == f(input)"]}, + + {"kind":"where","text":"The following expressions, inputs and expected String results are being used :","code":{"expression":["\"I[0]*I[1]+(1-I[2])*I[3]\"","\"I[0]*I[1]+(1-I[2])*I[3]\"","\"(1-I[0])*I[1]\"","\"(1-I[0])*I[1]\""],"input":["new double[]{0.9, 0.0, 0.9, -3.0}","new double[]{-0.9, 2.0, 0.2, 1.0}","new double[]{0.9, -3.0}","new double[]{-0.9, 2.0}"],"output":["-0.29999999999999993","-1.0","-0.29999999999999993","3.8"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Equations \"I[0]*I[1]+(1-I[2])*I[3]\" and \"(1-I[0])*I[1]\" used within ADAM return expected results. [3]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.optimization.AdaGrad_Spec.json b/docs/spock/reports/ut.optimization.AdaGrad_Spec.json index 6e35a9693..bf34d4d89 100644 --- a/docs/spock/reports/ut.optimization.AdaGrad_Spec.json +++ b/docs/spock/reports/ut.optimization.AdaGrad_Spec.json @@ -4,19 +4,190 @@ "narrative":"", "subjects":["neureka.optimization.Optimizer"], "statistics":{ - "runs":"1", + "runs":"10", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.013 seconds" + "duration":"0.028 seconds" }, "headers":["\n The code below assumes that for we\n have the following 2 variables setup\n throughout every data table iteration:\n ```\n Tensor w = Tensor.of(0d)\n Optimizer o = Optimizer.AdaGrad.create(w) \n w.set(o) \n ```\n "],"tags":{},"see":[], "features":[ { - "id":"AdaGrad optimizes according to expected inputs", + "id":"AdaGrad optimizes according to expected inputs [0]", "result":"PASS", - "duration":"0.010 seconds", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [1]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [2]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [3]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [4]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [5]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [6]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [8]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double)gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00999","0.01707","0.01280001","0.01819","0.01481","0.01161","0.0170001","0.02075","0.02426","0.02198"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"AdaGrad optimizes according to expected inputs [9]", + "result":"PASS", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.optimization.Momentum_Spec.json b/docs/spock/reports/ut.optimization.Momentum_Spec.json index d24a66def..3c9684da7 100644 --- a/docs/spock/reports/ut.optimization.Momentum_Spec.json +++ b/docs/spock/reports/ut.optimization.Momentum_Spec.json @@ -1,20 +1,191 @@ { "className":"ut.optimization.Momentum_Spec", "title":"", - "narrative":"Momentum is an extension to the gradient descent optimization\n algorithm that allows the search to build inertia in a direction\n in the search space and overcome the oscillations of noisy\n gradients and coast across flat spots of the search space.", + "narrative":"Momentum is an extension to the gradient descent optimization \n algorithm that allows the search to build inertia in a direction \n in the search space and overcome the oscillations of noisy \n gradients and coast across flat spots of the search space.", "subjects":["neureka.optimization.Optimizer"], "statistics":{ - "runs":"1", + "runs":"10", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.007 seconds" + "duration":"0.018 seconds" }, "headers":["\n The code below assumes that for we\n have the following 2 variables setup\n throughout every data table iteration:\n ```\n Tensor w = Tensor.of(0d)\n Optimizer o = Optimizer.Momentum.create(w) \n w.set(o) \n ```\n "],"tags":{},"see":[], "features":[ { - "id":"Momentum optimizes according to expected inputs", + "id":"Momentum optimizes according to expected inputs [0]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [8]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00299","0.00869","0.01182","0.01764","0.02088","0.02179","0.02661","0.03395","0.04355","0.050200001"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Momentum optimizes according to expected inputs [9]", "result":"PASS", "duration":"0.001 seconds", "iterations":{ diff --git a/docs/spock/reports/ut.optimization.RMSProp_Spec.json b/docs/spock/reports/ut.optimization.RMSProp_Spec.json index fdb7afcc4..0529efe66 100644 --- a/docs/spock/reports/ut.optimization.RMSProp_Spec.json +++ b/docs/spock/reports/ut.optimization.RMSProp_Spec.json @@ -1,22 +1,193 @@ { "className":"ut.optimization.RMSProp_Spec", "title":"", - "narrative":"**Root Mean Squared Propagation**, or RMSProp, is an extension of gradient\n descent and the AdaGrad version of gradient descent that uses a\n decaying average of partial gradients in the adaptation of the\n step size for each parameter.", + "narrative":"**Root Mean Squared Propagation**, or RMSProp, is an extension of gradient \n descent and the AdaGrad version of gradient descent that uses a \n decaying average of partial gradients in the adaptation of the \n step size for each parameter.", "subjects":["neureka.optimization.Optimizer"], "statistics":{ - "runs":"1", + "runs":"10", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.014 seconds" + "duration":"0.028 seconds" }, "headers":["\n The code below assumes that for we\n have the following 2 variables setup\n throughout every data table iteration:\n ```\n Tensor w = Tensor.of(0d)\n Optimizer o = Optimizer.RMSProp.create(w) \n w.set(o) \n ```\n "],"tags":{},"see":[], "features":[ { - "id":"RMSprop optimizes according to expected inputs", + "id":"RMSprop optimizes according to expected inputs [0]", "result":"PASS", - "duration":"0.010 seconds", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [1]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [7]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [8]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new scalar gradient tensor is being created.","code":["Tensor g = Tensor.of(expectedWeight)"]}, + + {"kind":"and","text":"The following input is being applied to the tensor (and internal optimizer)...","code":["w.set( Tensor.of( (double) gradient ) )","w.applyGradient()"]}, + + {"kind":"expect","text":"The following state emerges:","code":["w.toString().contains(g.toString())","w.shape.hashCode()==g.shape.hashCode()","w.strides().hashCode()==g.strides().hashCode()","w.indicesMap().hashCode()==g.indicesMap().hashCode()","w.spread().hashCode()==g.spread().hashCode()","w.offset().hashCode()==g.offset().hashCode()"]}, + + {"kind":"where","text":"","code":{"gradient":["-3","-3","2","-3","2","2","-4","-3","-3","2"],"expectedWeight":["0.00316","0.00545","0.00402","0.00586","0.00466","0.00349","0.00544","0.00682","0.00815","0.00725"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"RMSprop optimizes according to expected inputs [9]", + "result":"PASS", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Copy_Spec.json b/docs/spock/reports/ut.tensors.Copy_Spec.json index 939738270..6b3123535 100644 --- a/docs/spock/reports/ut.tensors.Copy_Spec.json +++ b/docs/spock/reports/ut.tensors.Copy_Spec.json @@ -4,19 +4,19 @@ "narrative":"In this specification we cover the behaviour of tensors with respect to their copy methods.\n There are to main ways to copy a tensor:
        \n 1. .shallowCopy()
        \n 2. .deepCopy()
        \n
        \n The first method creates a new tensor with the same underlying data array as the original tensor.
        \n The second method on the other hand creates a new tensor with a new data array.
        \n
        \n The first method is the most efficient, but it is not as safe as the second method.
        \n The second method is the most safe, but it is not as efficient.
        \n
        \n Besides these 2 main requirements, there are als some corner cases with respect to\n the components of a tensor (like for example its computation graph) which\n will be covered in this specification as well.", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"5", + "runs":"17", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.013 seconds" + "duration":"0.038 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A deep copy of a tensor is also a deep copy of the underlying data array.", "result":"PASS", - "duration":"0", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,7 +37,36 @@ }, { - "id":"A shallow copy of a tensor will be flagged as such.", + "id":"A shallow copy of a tensor will be flagged as such. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"expect","text":"The tensor we will use for copying is not flagged as a shallow copy.","code":["!t.isShallowCopy()","t.toString()"]}, + + {"kind":"when","text":"We create a shallow copy of the tensor.","code":["var shallow = t.shallowCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["shallow !== t // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["shallow.shape == t.shape","shallow.mut.data.get() == t.mut.data.get() // The tensors share the same values...","shallow.mut.data.get() === t.mut.data.get() // ...as well as the same array!","shallow.mut.data === t.mut.data // In fact, their data container is the same instance."]}, + + {"kind":"and","text":"","code":["(0.. shallow.at(i) == t.at(i) }) // The values are the same!"]}, + + {"kind":"and","text":"The shallow copy is flagged as such.","code":["shallow.isShallowCopy()"]}, + + {"kind":"and","text":"Because shallow copies are merely \"fully slices\" we expect this flag to be set as well.","code":["shallow.isFullSlice()"]}, + + {"kind":"and","text":"The inverse property is false:","code":["!shallow.isPartialSlice()"]}, + + {"kind":"where","text":"","code":{"t":["Tensor.ofInts().withShape(2, 3).andFill(1, 2, -9, 8, 3, -2)","Tensor.ofBytes().withShape(5).andFill(8, 2, -7, 3, 0)","Tensor.of(1d, 2d, 3d, 4d, 5d, 6d, 7d)2..4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A shallow copy of a tensor will be flagged as such. [1]", "result":"PASS", "duration":"0", "iterations":{ @@ -66,12 +95,41 @@ }, { - "id":"A deep copy of a slice tensor is also a deep copy of the underlying data array.", + "id":"A shallow copy of a tensor will be flagged as such. [2]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"expect","text":"The tensor we will use for copying is not flagged as a shallow copy.","code":["!t.isShallowCopy()","t.toString()"]}, + + {"kind":"when","text":"We create a shallow copy of the tensor.","code":["var shallow = t.shallowCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["shallow !== t // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["shallow.shape == t.shape","shallow.mut.data.get() == t.mut.data.get() // The tensors share the same values...","shallow.mut.data.get() === t.mut.data.get() // ...as well as the same array!","shallow.mut.data === t.mut.data // In fact, their data container is the same instance."]}, + + {"kind":"and","text":"","code":["(0.. shallow.at(i) == t.at(i) }) // The values are the same!"]}, + + {"kind":"and","text":"The shallow copy is flagged as such.","code":["shallow.isShallowCopy()"]}, + + {"kind":"and","text":"Because shallow copies are merely \"fully slices\" we expect this flag to be set as well.","code":["shallow.isFullSlice()"]}, + + {"kind":"and","text":"The inverse property is false:","code":["!shallow.isPartialSlice()"]}, + + {"kind":"where","text":"","code":{"t":["Tensor.ofInts().withShape(2, 3).andFill(1, 2, -9, 8, 3, -2)","Tensor.ofBytes().withShape(5).andFill(8, 2, -7, 3, 0)","Tensor.of(1d, 2d, 3d, 4d, 5d, 6d, 7d)2..4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A deep copy of a slice tensor is also a deep copy of the underlying data array.", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"A slice of ints with shape (2, 2) sliced in-place from a tensor of shape (3, 3).","code":["var s = Tensor.ofInts().withShape(3, 3).andFill(1, 2, -9, 8, 3, -2)[0..1, 1..2]"]}, @@ -91,7 +149,30 @@ }, { - "id":"A shallow copy will share the same underlying data as its original tensor.", + "id":"A shallow copy will share the same underlying data as its original tensor. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of ints with shape (2, 3).","code":["var t = Tensor.ofInts().withShape(2, 3).andFill(1, 2, -9, 8, 3, -2)"]}, + + {"kind":"expect","text":"The underlying data array is as expected.","code":["t.mut.data.get() == [1, 2, -9, 8, 3, -2] // It's unsafe because it exposes mutable parts of the tensor!"]}, + + {"kind":"when","text":"We create a shallow copy of the tensor.","code":["var shallow = cloner(t)"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["shallow !== t // It's not the same instance!","shallow.shape == t.shape","shallow.mut.data.get() == t.mut.data.get() // The tensors share the same values!","shallow.mut.data.get() === t.mut.data.get() // The tensors share the exact same data array!"]}, + + {"kind":"and","text":"We verify that they share the same ints through the \"every\" method.","code":["(0.. shallow.at(i) == t.at(i) }) // The values are the same!"]}, + + {"kind":"where","text":"","code":{"cloner":["{ Tensor x -> x.shallowCopy()}","{ Tensor x -> x.shallowClone()}"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A shallow copy will share the same underlying data as its original tensor. [1]", "result":"PASS", "duration":"0", "iterations":{ @@ -114,7 +195,7 @@ }, { - "id":"We can deep copy various types of tensors.", + "id":"We can deep copy various types of tensors. [0]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ @@ -136,6 +217,231 @@ {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} ], "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can deep copy various types of tensors. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A simple vector tensor which we are going to copy.","code":["var t = Tensor.of(type).withShape(expected.length).andFill(expected)"]}, + + {"kind":"and","text":"A slice of the tensor, which we should also be able to deep copy.","code":["var s = t[1..<(expected.length - 1)]"]}, + + {"kind":"when","text":"","code":["var deep = t.deepCopy()","var deepSlice = s.deepCopy()"]}, + + {"kind":"then","text":"The copy is not the same instance as the original tensor.","code":["deep !== t // It's not the same instance!","deepSlice !== s // It's not the same instance!"]}, + + {"kind":"and","text":"The shape and underlying data array are equal to the original tensor but the data is not identical.","code":["deep.shape == t.shape","deep.mut.data.get() == t.mut.data.get() // The tensors share the same values!","deep.mut.data.get() !== t.mut.data.get() // ...but they are not the same array!"]}, + + {"kind":"and","text":"Both the copied tensor and its slice have the expected values.","code":["deep.items == expected","deepSlice.items == expected[1..<(expected.length - 1)]"]}, + + {"kind":"where","text":"We can use the following types and values for the above code.","code":{"type":["Integer","Byte","Short","Long","Float","Double","Boolean","Character","String","Object"],"expected":["[6, 2, 0, -387, 22, 53, -92] as int[]","[-1, 4, 2, -49, 2, -72, 235, 0, 3] as byte[]","[65, -20, -7, -8, -3, -4, -5, -6, -9] as short[]","[0, 5462, -976, -3, -42, -35, -3436, -7, -89] as long[]","[0.5076, -1.0, -2.4, -3.0, -4.0, -5.0, -6.0] as float[]","[4.26434, -4.0, 5.3, -6.6, -7.0, 9.67] as double[]","[true, false, true, false, true, false, true, false, true] as boolean[]","['t', 'e', 's', 't', 'd', 'a', 't', 'a', '!'] as char[]","[\"test\", \"data\", \"!\"] as String[]","[\"What\", 4, 'm' as char, 1] as Object[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} } ], diff --git a/docs/spock/reports/ut.tensors.DimTrim_Spec.json b/docs/spock/reports/ut.tensors.DimTrim_Spec.json index beeec0944..832d7b048 100644 --- a/docs/spock/reports/ut.tensors.DimTrim_Spec.json +++ b/docs/spock/reports/ut.tensors.DimTrim_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.002 seconds" + "duration":"0.005 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The \"dimTrim\" operation works on slices too!", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Expression_Based_Tensor_Instantiation_Spec.json b/docs/spock/reports/ut.tensors.Expression_Based_Tensor_Instantiation_Spec.json index a03a5bafe..9ff53b1de 100644 --- a/docs/spock/reports/ut.tensors.Expression_Based_Tensor_Instantiation_Spec.json +++ b/docs/spock/reports/ut.tensors.Expression_Based_Tensor_Instantiation_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.014 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A tensor can be created from a function as expression.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n The `Tensor.of` method can be used to instantiate a tensor\n using a string expression which defines a function \n followed by an arbitrary number of tensor arguments\n which are used as input for the function.\n "] }, @@ -35,7 +35,7 @@ { "id":"We can instantiate tensors from various simple string expressions.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Fluent_Tensor_Creation_Spec.json b/docs/spock/reports/ut.tensors.Fluent_Tensor_Creation_Spec.json index 0d2d0e8d9..a48788eaf 100644 --- a/docs/spock/reports/ut.tensors.Fluent_Tensor_Creation_Spec.json +++ b/docs/spock/reports/ut.tensors.Fluent_Tensor_Creation_Spec.json @@ -4,53 +4,819 @@ "narrative":"", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"7", + "runs":"46", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.018 seconds" + "duration":"0.046 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Tensors can be created fluently.", + "id":"Tensors can be created fluently. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData.length == 1","t.items.size() == 6"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is homogeneously filled it will be a \"virtual tensor\"\n This means that the tensor will not have allocated the memory proportional to the size\n of the tensor!\n ","code":["t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["4 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 4 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors can be created fluently. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData.length == 1","t.items.size() == 6"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is homogeneously filled it will be a \"virtual tensor\"\n This means that the tensor will not have allocated the memory proportional to the size\n of the tensor!\n ","code":["t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["4 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 4 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors can be created fluently. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData.length == 1","t.items.size() == 6"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is homogeneously filled it will be a \"virtual tensor\"\n This means that the tensor will not have allocated the memory proportional to the size\n of the tensor!\n ","code":["t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["4 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 4 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors can be created fluently. [3]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData.length == 1","t.items.size() == 6"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is homogeneously filled it will be a \"virtual tensor\"\n This means that the tensor will not have allocated the memory proportional to the size\n of the tensor!\n ","code":["t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["4 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 4 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors can be created fluently. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData.length == 1","t.items.size() == 6"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is homogeneously filled it will be a \"virtual tensor\"\n This means that the tensor will not have allocated the memory proportional to the size\n of the tensor!\n ","code":["t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["4 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 4 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors can be created fluently. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!"] + }, + "blocks":[ + {"kind":"given","text":"We create a new homogeneously filled Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData.length == 1","t.items.size() == 6"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is homogeneously filled it will be a \"virtual tensor\"\n This means that the tensor will not have allocated the memory proportional to the size\n of the tensor!\n ","code":["t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["4 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 4 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Range based tensors can be created fluently. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Value based tensors can be created fluently. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Seed based tensors can be created fluently. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Initialization lambda based tensors can be created fluently. [0]", "result":"PASS", "duration":"0.001 seconds", "iterations":{ - "tags":{},"see":[],"extraInfo":["This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!","This feature is based on a fluent builder API!"] + "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We create a new homogeneously filled Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .all( value )"]}, + {"kind":"given","text":"We create a Tensor instance by passing an initialization lambda which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andWhere( initializer )"]}, {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, - {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData.length == 1","t.items.size() == 6"]}, + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, - {"kind":"and","text":"\n Based on the fact that the tensor is homogeneously filled it will be a \"virtual tensor\"\n This means that the tensor will not have allocated the memory proportional to the size\n of the tensor!\n ","code":["t.isVirtual()"]}, + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, - {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Float","Long","Boolean","Character"],"value":["4 as int","4.0 as double","4f as float","42L as Long","false","'°' as char"],"data":["new int[] { 4 }","new double[]{ 4.0 }","new float[] { 4f }","new long[] { 42L }","new boolean[] { false }","new char[] { '°' as char }"]}} + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Character.class","Boolean.class"],"initializer":["{ i, indices -> (i + indices.sum()) }","{ i, indices -> (double) (i + indices.sum()) }","{ i, indices -> (short) (i + indices.sum()) }","{ i, indices -> (float) (i + indices.sum()) }","{ i, indices -> (byte) (i + indices.sum()) }","{ i, indices -> (long) (i + indices.sum()) }","{ i, indices -> (char) (i + indices.sum()) }","{ i, indices -> (boolean)(i % 2 == 0) }"],"expected":["[0, 2, 3, 5, 6, 8] as int[]","[0, 2, 3, 5, 6, 8] as double[]","[0, 2, 3, 5, 6, 8] as short[]","[0, 2, 3, 5, 6, 8] as float[]","[0, 2, 3, 5, 6, 8] as byte[]","[0, 2, 3, 5, 6, 8] as long[]","[0, 2, 3, 5, 6, 8] as char[]","[true, false, true, false, true, false] as boolean[]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Range based tensors can be created fluently.", + "id":"Initialization lambda based tensors can be created fluently. [1]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We create a range based Tensor instance using the fluent builder API.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFillFrom( from ).to( to ).step( step )"]}, + {"kind":"given","text":"We create a Tensor instance by passing an initialization lambda which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andWhere( initializer )"]}, {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, - {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, @@ -58,20 +824,20 @@ {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, - {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Integer.class","Double.class","Double.class","Float.class","Float.class","Byte.class","Long.class"],"from":["-9 as int","-2 as int","2.7 as double","-3 as double","6.4f as float","0f as float","-5 as byte","-65 as long"],"to":["18 as int","4 as int","45.0 as double","3 as double","78.3f as float","1f as float","6 as byte","45 as long"],"step":["2","2","3","0.5","4","0.2f","2","5"],"data":["[-9, -7, -5, -3, -1, 1] as int[]","[-2, 0, 2, 4, -2, 0] as int[]","[2.7, 5.7, 8.7, 11.7, 14.7, 17.7] as double[]","[-3.0, -2.5, -2.0, -1.5, -1.0, -0.5] as double[]","[6.4, 10.4, 14.4, 18.4, 22.4, 26.4] as float[]","[0.0, 0.2, 0.4, 0.6, 0.8, 1.0] as float[]","[-5, -3, -1, 1, 3, 5] as byte[]","[-65, -60, -55, -50, -45, -40] as long[]"]}} + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Character.class","Boolean.class"],"initializer":["{ i, indices -> (i + indices.sum()) }","{ i, indices -> (double) (i + indices.sum()) }","{ i, indices -> (short) (i + indices.sum()) }","{ i, indices -> (float) (i + indices.sum()) }","{ i, indices -> (byte) (i + indices.sum()) }","{ i, indices -> (long) (i + indices.sum()) }","{ i, indices -> (char) (i + indices.sum()) }","{ i, indices -> (boolean)(i % 2 == 0) }"],"expected":["[0, 2, 3, 5, 6, 8] as int[]","[0, 2, 3, 5, 6, 8] as double[]","[0, 2, 3, 5, 6, 8] as short[]","[0, 2, 3, 5, 6, 8] as float[]","[0, 2, 3, 5, 6, 8] as byte[]","[0, 2, 3, 5, 6, 8] as long[]","[0, 2, 3, 5, 6, 8] as char[]","[true, false, true, false, true, false] as boolean[]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Value based tensors can be created fluently.", + "id":"Initialization lambda based tensors can be created fluently. [2]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andFill( data )"]}, + {"kind":"given","text":"We create a Tensor instance by passing an initialization lambda which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andWhere( initializer )"]}, {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, @@ -83,20 +849,20 @@ {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, - {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Boolean.class","Character.class"],"data":["[2, 3, 4] as Integer[]","[-5, 6.5, 7] as Double[]","[6, -1, -2] as Short[]","[22.4, 26.4] as Float[]","[-20, 3, 4, -3] as Byte[]","[23, 199] as Long[]","[true, false] as Boolean[]","['x', 'y'] as Character[]"],"expected":["[2, 3, 4, 2, 3, 4] as int[]","[-5, 6.5, 7, -5, 6.5, 7] as double[]","[6, -1, -2, 6, -1, -2] as short[]","[22.4, 26.4, 22.4, 26.4, 22.4, 26.4] as float[]","[-20, 3, 4, -3, -20, 3] as byte[]","[23, 199, 23, 199, 23, 199] as long[]","[true, false, true, false, true, false] as boolean[]","['x', 'y', 'x', 'y', 'x', 'y'] as char[]"]}} + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Character.class","Boolean.class"],"initializer":["{ i, indices -> (i + indices.sum()) }","{ i, indices -> (double) (i + indices.sum()) }","{ i, indices -> (short) (i + indices.sum()) }","{ i, indices -> (float) (i + indices.sum()) }","{ i, indices -> (byte) (i + indices.sum()) }","{ i, indices -> (long) (i + indices.sum()) }","{ i, indices -> (char) (i + indices.sum()) }","{ i, indices -> (boolean)(i % 2 == 0) }"],"expected":["[0, 2, 3, 5, 6, 8] as int[]","[0, 2, 3, 5, 6, 8] as double[]","[0, 2, 3, 5, 6, 8] as short[]","[0, 2, 3, 5, 6, 8] as float[]","[0, 2, 3, 5, 6, 8] as byte[]","[0, 2, 3, 5, 6, 8] as long[]","[0, 2, 3, 5, 6, 8] as char[]","[true, false, true, false, true, false] as boolean[]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Seed based tensors can be created fluently.", + "id":"Initialization lambda based tensors can be created fluently. [3]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We create a Tensor instance by passing an array of arguments which ought to be populated based on a seed.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andSeed( seed )"]}, + {"kind":"given","text":"We create a Tensor instance by passing an initialization lambda which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andWhere( initializer )"]}, {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, @@ -106,17 +872,90 @@ {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, - {"kind":"and","text":"","code":["t.toString().startsWith(\"(3x2):\")"]}, + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Character.class","Boolean.class"],"initializer":["{ i, indices -> (i + indices.sum()) }","{ i, indices -> (double) (i + indices.sum()) }","{ i, indices -> (short) (i + indices.sum()) }","{ i, indices -> (float) (i + indices.sum()) }","{ i, indices -> (byte) (i + indices.sum()) }","{ i, indices -> (long) (i + indices.sum()) }","{ i, indices -> (char) (i + indices.sum()) }","{ i, indices -> (boolean)(i % 2 == 0) }"],"expected":["[0, 2, 3, 5, 6, 8] as int[]","[0, 2, 3, 5, 6, 8] as double[]","[0, 2, 3, 5, 6, 8] as short[]","[0, 2, 3, 5, 6, 8] as float[]","[0, 2, 3, 5, 6, 8] as byte[]","[0, 2, 3, 5, 6, 8] as long[]","[0, 2, 3, 5, 6, 8] as char[]","[true, false, true, false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Initialization lambda based tensors can be created fluently. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an initialization lambda which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andWhere( initializer )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, - {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer","Double","Short","Float","Byte","Long","Boolean","Character"],"seed":["\"a\"","\"b\"","\"c\"","\"d\"","\"e\"","\"f\"","\"g\"","\"h\""],"expected":["[1431614970, 345625747, -1944974668, -1560046587, -840164727, 1545421892] as int[]","[0.5099337204650233, -0.1940291796851406, 1.4457326764876133, 1.1037197321548482, 0.5318191965243577, 0.19202511115716991] as double[]","[-14216, -20070, 24851, -22296, -9925, 31593] as short[]","[1.4457327, 1.1037197, 0.5318192, 0.19202511, 0.450341, -0.18904476] as float[]","[57, 26, -121, -59, -23, 90] as byte[]","[7366351542344062765, -5751258536495446167, -794811082727408195, 283695574932216990, -455308978758056709, -8426760264878742120] as long[]","[true, false, false, false, false, true] as boolean[]","[-1747130645, -868747698, 1684960924, -1581710323, -1526159736, -842114084] as char[]"]}} + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Character.class","Boolean.class"],"initializer":["{ i, indices -> (i + indices.sum()) }","{ i, indices -> (double) (i + indices.sum()) }","{ i, indices -> (short) (i + indices.sum()) }","{ i, indices -> (float) (i + indices.sum()) }","{ i, indices -> (byte) (i + indices.sum()) }","{ i, indices -> (long) (i + indices.sum()) }","{ i, indices -> (char) (i + indices.sum()) }","{ i, indices -> (boolean)(i % 2 == 0) }"],"expected":["[0, 2, 3, 5, 6, 8] as int[]","[0, 2, 3, 5, 6, 8] as double[]","[0, 2, 3, 5, 6, 8] as short[]","[0, 2, 3, 5, 6, 8] as float[]","[0, 2, 3, 5, 6, 8] as byte[]","[0, 2, 3, 5, 6, 8] as long[]","[0, 2, 3, 5, 6, 8] as char[]","[true, false, true, false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Initialization lambda based tensors can be created fluently. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an initialization lambda which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andWhere( initializer )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Character.class","Boolean.class"],"initializer":["{ i, indices -> (i + indices.sum()) }","{ i, indices -> (double) (i + indices.sum()) }","{ i, indices -> (short) (i + indices.sum()) }","{ i, indices -> (float) (i + indices.sum()) }","{ i, indices -> (byte) (i + indices.sum()) }","{ i, indices -> (long) (i + indices.sum()) }","{ i, indices -> (char) (i + indices.sum()) }","{ i, indices -> (boolean)(i % 2 == 0) }"],"expected":["[0, 2, 3, 5, 6, 8] as int[]","[0, 2, 3, 5, 6, 8] as double[]","[0, 2, 3, 5, 6, 8] as short[]","[0, 2, 3, 5, 6, 8] as float[]","[0, 2, 3, 5, 6, 8] as byte[]","[0, 2, 3, 5, 6, 8] as long[]","[0, 2, 3, 5, 6, 8] as char[]","[true, false, true, false, true, false] as boolean[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Initialization lambda based tensors can be created fluently. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a Tensor instance by passing an initialization lambda which ought to iteratively fill the instance.","code":["Tensor t = Tensor.of( type )"," .withShape( 3, 2 )"," .andWhere( initializer )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"and","text":"The tensor will have the shape we passed to the builder.","code":["t.shape() == [3, 2]"]}, + + {"kind":"and","text":"The size of the tensor will be the product of all shape entries!","code":["t.size() == 6"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Short.class","Float.class","Byte.class","Long.class","Character.class","Boolean.class"],"initializer":["{ i, indices -> (i + indices.sum()) }","{ i, indices -> (double) (i + indices.sum()) }","{ i, indices -> (short) (i + indices.sum()) }","{ i, indices -> (float) (i + indices.sum()) }","{ i, indices -> (byte) (i + indices.sum()) }","{ i, indices -> (long) (i + indices.sum()) }","{ i, indices -> (char) (i + indices.sum()) }","{ i, indices -> (boolean)(i % 2 == 0) }"],"expected":["[0, 2, 3, 5, 6, 8] as int[]","[0, 2, 3, 5, 6, 8] as double[]","[0, 2, 3, 5, 6, 8] as short[]","[0, 2, 3, 5, 6, 8] as float[]","[0, 2, 3, 5, 6, 8] as byte[]","[0, 2, 3, 5, 6, 8] as long[]","[0, 2, 3, 5, 6, 8] as char[]","[true, false, true, false, true, false] as boolean[]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Initialization lambda based tensors can be created fluently.", + "id":"Initialization lambda based tensors can be created fluently. [7]", "result":"PASS", "duration":"0", "iterations":{ @@ -141,7 +980,82 @@ }, { - "id":"Vectors can be created fluently.", + "id":"Vectors can be created fluently. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a new `Tensor` instance using the \"vector\" method in the fluent builder API.","code":["Tensor t = Tensor.of( type ).vector( values )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have a one dimensional shape of the same length as the provided data array.","code":["t.shape() == [values.length]"]}, + + {"kind":"and","text":"The size of the tensor will also be as long as the data array!","code":["t.size() == values.length"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Float.class","Long.class"],"values":["[4, 5, -2] as Integer[]","[-1, 7.5] as Double[]","[0.6, -32.7] as Float[]","[1, 3, 2, 4] as Long[]"],"data":["new int[] { 4, 5, -2 }","new double[]{ -1, 7.5 }","new float[] { 0.6, -32.7 }","new long[] { 1, 3, 2, 4 }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Vectors can be created fluently. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a new `Tensor` instance using the \"vector\" method in the fluent builder API.","code":["Tensor t = Tensor.of( type ).vector( values )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have a one dimensional shape of the same length as the provided data array.","code":["t.shape() == [values.length]"]}, + + {"kind":"and","text":"The size of the tensor will also be as long as the data array!","code":["t.size() == values.length"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Float.class","Long.class"],"values":["[4, 5, -2] as Integer[]","[-1, 7.5] as Double[]","[0.6, -32.7] as Float[]","[1, 3, 2, 4] as Long[]"],"data":["new int[] { 4, 5, -2 }","new double[]{ -1, 7.5 }","new float[] { 0.6, -32.7 }","new long[] { 1, 3, 2, 4 }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Vectors can be created fluently. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a new `Tensor` instance using the \"vector\" method in the fluent builder API.","code":["Tensor t = Tensor.of( type ).vector( values )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have a one dimensional shape of the same length as the provided data array.","code":["t.shape() == [values.length]"]}, + + {"kind":"and","text":"The size of the tensor will also be as long as the data array!","code":["t.size() == values.length"]}, + + {"kind":"and","text":"\n Based on the fact that the tensor is not homogeneously filled it will be an \"actual tensor\".\n The opposite of that, a \"virtual tensor\", would mean that a tensor does not have allocated \n memory proportional to the size of the tensor! \n In this case however the tensor should be actual which means that it is not virtual.\n ","code":["!t.isVirtual()"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Float.class","Long.class"],"values":["[4, 5, -2] as Integer[]","[-1, 7.5] as Double[]","[0.6, -32.7] as Float[]","[1, 3, 2, 4] as Long[]"],"data":["new int[] { 4, 5, -2 }","new double[]{ -1, 7.5 }","new float[] { 0.6, -32.7 }","new long[] { 1, 3, 2, 4 }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Vectors can be created fluently. [3]", "result":"PASS", "duration":"0", "iterations":{ @@ -166,7 +1080,76 @@ }, { - "id":"Scalars can be created fluently.", + "id":"Scalars can be created fluently. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a new `Tensor` instance using the \"scalar\" method in the fluent builder API.","code":["Tensor t = Tensor.of( type ).scalar( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have a one dimensional shape of 1.","code":["t.shape() == [1]"]}, + + {"kind":"and","text":"The size of the tensor will also 1!","code":["t.size() == 1"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Float.class","Long.class"],"value":["3 as Integer","5.7 as Double","9.4f as Float","42L as Long"],"data":["new int[] { 3 }","new double[]{ 5.7 }","new float[] { 9.4f }","new long[] { 42L }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalars can be created fluently. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a new `Tensor` instance using the \"scalar\" method in the fluent builder API.","code":["Tensor t = Tensor.of( type ).scalar( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have a one dimensional shape of 1.","code":["t.shape() == [1]"]}, + + {"kind":"and","text":"The size of the tensor will also 1!","code":["t.size() == 1"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Float.class","Long.class"],"value":["3 as Integer","5.7 as Double","9.4f as Float","42L as Long"],"data":["new int[] { 3 }","new double[]{ 5.7 }","new float[] { 9.4f }","new long[] { 42L }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalars can be created fluently. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a new `Tensor` instance using the \"scalar\" method in the fluent builder API.","code":["Tensor t = Tensor.of( type ).scalar( value )"]}, + + {"kind":"expect","text":"This new instance will have the expected data type...","code":["t.dataType == DataType.of(type)"]}, + + {"kind":"and","text":"...also it will contain the expected data.","code":["t.mut.data.get() == data","t.rawData == data"]}, + + {"kind":"and","text":"The tensor will have a one dimensional shape of 1.","code":["t.shape() == [1]"]}, + + {"kind":"and","text":"The size of the tensor will also 1!","code":["t.size() == 1"]}, + + {"kind":"where","text":"The following data is being used to populate the builder API:","code":{"type":["Integer.class","Double.class","Float.class","Long.class"],"value":["3 as Integer","5.7 as Double","9.4f as Float","42L as Long"],"data":["new int[] { 3 }","new double[]{ 5.7 }","new float[] { 9.4f }","new long[] { 42L }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalars can be created fluently. [3]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.tensors.Functional_Nda_Spec.json b/docs/spock/reports/ut.tensors.Functional_Nda_Spec.json index 83c03cc29..215286cae 100644 --- a/docs/spock/reports/ut.tensors.Functional_Nda_Spec.json +++ b/docs/spock/reports/ut.tensors.Functional_Nda_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.272 seconds" + "duration":"0.558 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can initialize an ND-Array using a filler lambda mapping indices to items.", "result":"PASS", - "duration":"0.043 seconds", + "duration":"0.108 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"We can find both min and max items in an ND-array by providing a comparator.", "result":"PASS", - "duration":"0.029 seconds", + "duration":"0.055 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -54,7 +54,7 @@ { "id":"We can analyse the values of a nd-array using various predicate receiving methods", "result":"PASS", - "duration":"0.031 seconds", + "duration":"0.062 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -75,7 +75,7 @@ { "id":"We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".", "result":"PASS", - "duration":"0.027 seconds", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -90,7 +90,7 @@ { "id":"We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\".", "result":"PASS", - "duration":"0.028 seconds", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -105,7 +105,7 @@ { "id":"ND-Array mapping lambdas produce expected nd-arrays.", "result":"PASS", - "duration":"0.027 seconds", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -126,7 +126,7 @@ { "id":"The \"map\" method is a shorter convenience method for mapping to the same type.", "result":"PASS", - "duration":"0.027 seconds", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -145,7 +145,7 @@ { "id":"We can find both min and max items in a tensor by providing a comparator.", "result":"PASS", - "duration":"0.027 seconds", + "duration":"0.055 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -166,7 +166,7 @@ { "id":"We can collect a stream into a nd-array.", "result":"PASS", - "duration":"0.027 seconds", + "duration":"0.059 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Functional_Tensor_Spec.json b/docs/spock/reports/ut.tensors.Functional_Tensor_Spec.json index 44d7a4eab..311147487 100644 --- a/docs/spock/reports/ut.tensors.Functional_Tensor_Spec.json +++ b/docs/spock/reports/ut.tensors.Functional_Tensor_Spec.json @@ -4,19 +4,19 @@ "narrative":"Tensors expose a powerful API for performing operations on them\n in a functional style.", "subjects":[], "statistics":{ - "runs":"7", + "runs":"4", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"0.301 seconds" + "skipped":"3", + "duration":"0.230 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can initialize a tensor using a filler lambda mapping indices to items.", "result":"PASS", - "duration":"0.029 seconds", + "duration":"0.056 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -30,13 +30,13 @@ { "id":"We can analyse the values of a tensor using various predicate receiving methods", - "result":"PASS", - "duration":"0.059 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We create 2 tensors, where one is a slice of the other.","code":["var a = Tensor.ofInts().withShape(3, 2).andFill(2, 0, 1, 1, 8, 3)","var b = a[1, 0..1]"]}, + {"kind":"given","text":"We create 2 tensors, where one is a slice of the other.","code":["var a = Tensor.ofInts().on(Device.get(device)).withShape(3, 2).andFill(2, 0, 1, 1, 8, 3)","var b = a[1, 0..1]"]}, {"kind":"expect","text":"","code":["!a.every((Predicate){it == 1})","a.any((Predicate){it == 1})","a.any((Predicate){it == 8})","!a.any((Predicate){it == 42})"]}, @@ -54,7 +54,7 @@ { "id":"We can use the \"filter\" method as a shortcut for \"stream().filter(..)\".", "result":"PASS", - "duration":"0.027 seconds", + "duration":"0.056 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -69,7 +69,7 @@ { "id":"We can use the \"flatMap\" method as a shortcut for \"stream().flatMap(..)\".", "result":"PASS", - "duration":"0.030 seconds", + "duration":"0.055 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -83,8 +83,8 @@ { "id":"Tensor mapping lambdas produce expected tensors.", - "result":"PASS", - "duration":"0.059 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -106,8 +106,8 @@ { "id":"The \"map\" method is a shorter convenience method for mapping to the same type.", - "result":"PASS", - "duration":"0.057 seconds", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -128,7 +128,7 @@ { "id":"We can find both min and max items in a tensor by providing a comparator.", "result":"PASS", - "duration":"0.030 seconds", + "duration":"0.058 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Reshape_Spec.json b/docs/spock/reports/ut.tensors.Reshape_Spec.json index e1b0991e4..7faabf24f 100644 --- a/docs/spock/reports/ut.tensors.Reshape_Spec.json +++ b/docs/spock/reports/ut.tensors.Reshape_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.002 seconds" + "duration":"0.013 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can create a new tensor with a different shape.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,7 +35,7 @@ { "id":"We can use `-1` in the desired shape if we want the axis size to be determined automatically.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -54,7 +54,7 @@ { "id":"The reshape operation supports autograd!", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Changing the shape of a tensor is a very common operation in machine learning.\n This is why the reshape operation also supports autograd.\n So for example when you have a tensor `a` with shape `s1` and you reshape it to \n a new tensor `b` with shape `s2` then during backpropagation the error `e_b` of `b`\n with the shape `s2` will be propagated to a new error `e_a` of `a` with the shape `s1`.\n It is basically the reshape operation applied in reverse.\n "] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_As_Container_Spec.json b/docs/spock/reports/ut.tensors.Tensor_As_Container_Spec.json index e19cab974..c5dbb25fe 100644 --- a/docs/spock/reports/ut.tensors.Tensor_As_Container_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_As_Container_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.009 seconds" + "duration":"0.055 seconds" }, "headers":["\n Although you can create a tensor of almost anything, you will\n not be able to execute operations on every kind of tensor... \n "],"tags":{},"see":[], "features":[ { "id":"Plus operator on String tensors works element-wise.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,7 +37,7 @@ { "id":"Tensor operations translate to custom data type \"ComplexNumber\".", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.020 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -54,7 +54,7 @@ { "id":"More tensor operations translate to custom data type \"ComplexNumber\".", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -69,7 +69,7 @@ { "id":"We can apply predicates on the values of a tensor.", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.014 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Assign_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Assign_Spec.json index 89cf2e773..516e04c19 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Assign_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Assign_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.011 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We can use the \"mut\" API to assign the contents of one tensor into another one.", "result":"PASS", - "duration":"0", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"Assignment can be easily achieved through subscription operators.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,7 @@ { "id":"We can assign one slice into another one.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Note that using the 'assign' operation on slices should be handled with care,\n since the operation has side effects on the underlying data array\n which is shared by both the slice and its parent.\n Use the 'copy' operation on slices if you want to avoid this.\n "] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Conversion_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Conversion_Spec.json index 9e18f9498..007d11dcb 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Conversion_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Conversion_Spec.json @@ -4,19 +4,19 @@ "narrative":"Here we specify how a tensor can be converted to other data types\n like for example another tensor of a different data type.", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"3", + "runs":"38", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.011 seconds" + "duration":"0.024 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"We turn a tensor into a scalar value or string through the \"as\" operator!", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"Tensors value type can be changed by calling \"toType(...)\".", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] }, @@ -52,11 +52,816 @@ }, { - "id":"We can change the data type of all kinds of tensors.", + "id":"We can change the data type of all kinds of tensors. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [11]", "result":"PASS", "duration":"0", "iterations":{ - "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n ","\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [14]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [18]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [19]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [20]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [21]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [22]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [23]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [24]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [25]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [26]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [27]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [28]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [29]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [30]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [31]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [32]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [33]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [34]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] + }, + "blocks":[ + {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, + + {"kind":"when","text":"We change the data type of the tensor using the unsafe \"toType\" method.","code":["var b = a.mut.toType(targetType)"]}, + + {"kind":"then","text":"The returned tensor has the expected data type.","code":["b.itemType == targetType"]}, + + {"kind":"and","text":"The returned tensor has the expected values.","code":["b.rawItems == data.collect({ it.asType(targetType) })"]}, + + {"kind":"and","text":"The returned tensor is in fact the original instance.","code":["a === b"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"sourceType":["Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Long","Long","Long","Long","Long","Long","Integer","Integer","Integer","Integer","Integer","Integer","Short","Short","Short","Short","Short","Short","Byte","Byte","Byte","Byte","Byte","Byte"],"targetType":["Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte","Double","Float","Long","Integer","Short","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can change the data type of all kinds of tensors. [35]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n Warning! The `toType` method mutates the tensor!\n This is especially problematic with respect to generics, \n because if the tensor is still used as a tensor of the old type, \n then the compiler will not be able to detect that the tensor has changed its type.\n This is why we have to use the `unsafe` API exists.\n Only use this method if there are urgent performance requirements and\n you know exactly what you are doing!\n "] }, "blocks":[ {"kind":"given","text":"A simple tensor with a few initial values.","code":["var data = [-3, -12, 42, -42, 12, 3]","var a = Tensor.of(sourceType).withShape(data.size()).andFill(data)"]}, diff --git a/docs/spock/reports/ut.tensors.Tensor_Convolution_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Convolution_Spec.json index 8b8292bc0..5eb636770 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Convolution_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Convolution_Spec.json @@ -4,19 +4,19 @@ "narrative":"This specification shows how convolution can be performed on tensors.\n\n Convolution is a linear operation which is not only important for image processing but also\n a central player in the field of machine learning (especially for computer vision).\n It is used to extract features from images and other typically ~2 dimensional data.\n Other than that it is extremely important in the field of signal processing.", "subjects":[], "statistics":{ - "runs":"10", + "runs":"20", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.639 seconds" + "duration":"1.132 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"We can perform a convolution operation on a 2D tensor.", + "id":"We can perform a convolution operation on a 2D tensor. [0]", "result":"PASS", - "duration":"0.114 seconds", + "duration":"0.056 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,9 +37,76 @@ }, { - "id":"Convolution with tensors of the same shape is equivalent to a dot product.", + "id":"We can perform a convolution operation on a 2D tensor. [1]", "result":"PASS", - "duration":"0.081 seconds", + "duration":"0.049 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor with shape [3, 3] and values [1, 2, 3, ..., 9].","code":["var x ="," Tensor.of(type, [3, 3], ["," 1, 2, 3,"," 4, 5, 6,"," 7, 8, 9"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [2, 2] and values [1, 2, 0, -1].","code":["var k ="," Tensor.of(type, [2, 2], ["," 1, 2,"," 0, -1"," ])"]}, + + {"kind":"and","text":"We move both tensors to a device on which we want to execute.","code":["x.to(device)","k.to(device)"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [2, 2] and value [0.0, 2.0, 6.0, 8.0].","code":["y.shape == [2, 2]","y.items == [0.0, 2.0, 6.0, 8.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can perform a convolution operation on a 2D tensor. [2]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor with shape [3, 3] and values [1, 2, 3, ..., 9].","code":["var x ="," Tensor.of(type, [3, 3], ["," 1, 2, 3,"," 4, 5, 6,"," 7, 8, 9"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [2, 2] and values [1, 2, 0, -1].","code":["var k ="," Tensor.of(type, [2, 2], ["," 1, 2,"," 0, -1"," ])"]}, + + {"kind":"and","text":"We move both tensors to a device on which we want to execute.","code":["x.to(device)","k.to(device)"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [2, 2] and value [0.0, 2.0, 6.0, 8.0].","code":["y.shape == [2, 2]","y.items == [0.0, 2.0, 6.0, 8.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Convolution with tensors of the same shape is equivalent to a dot product. [0]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor with shape [2, 2] and values [1, 3, 6, -8].","code":["var x ="," Tensor.of(Float, [2, 2], ["," 1, 3,"," 6, -8,"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [2, 2] and values [-2, 1, 4, 5].","code":["var k ="," Tensor.of(Float, [2, 2], ["," -2, 1,"," 4, 5"," ])"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [1, 1] and value [-15.0].","code":["y.shape == [1, 1]","y.items == [-15.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Convolution with tensors of the same shape is equivalent to a dot product. [1]", + "result":"PASS", + "duration":"0.050 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -58,9 +125,30 @@ }, { - "id":"Convolution can be performed using non-quadratic matrix tensors.", + "id":"Convolution with tensors of the same shape is equivalent to a dot product. [2]", "result":"PASS", - "duration":"0.078 seconds", + "duration":"0.049 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor with shape [2, 2] and values [1, 3, 6, -8].","code":["var x ="," Tensor.of(Float, [2, 2], ["," 1, 3,"," 6, -8,"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [2, 2] and values [-2, 1, 4, 5].","code":["var k ="," Tensor.of(Float, [2, 2], ["," -2, 1,"," 4, 5"," ])"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [1, 1] and value [-15.0].","code":["y.shape == [1, 1]","y.items == [-15.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Convolution can be performed using non-quadratic matrix tensors. [0]", + "result":"PASS", + "duration":"0.050 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -79,9 +167,51 @@ }, { - "id":"Convolution can be performed using tensors with an additional dimension as batch size.", + "id":"Convolution can be performed using non-quadratic matrix tensors. [1]", "result":"PASS", - "duration":"0.080 seconds", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor with shape [2, 3] and values [1, 3, 6, -8, 2, 4].","code":["var x ="," Tensor.of(Float, [2, 3], ["," 1, 3, 6,"," -8, 2, 4,"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [1, 2] and values [-2, 1].","code":["var k ="," Tensor.of(Float, [1, 2], ["," -2, 1"," ])"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [2, 2] and value [1.0, 0.0, 18.0, 0.0].","code":["y.shape == [2, 2]","y.items == [1.0, 0.0, 18.0, 0.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Convolution can be performed using non-quadratic matrix tensors. [2]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 2D tensor with shape [2, 3] and values [1, 3, 6, -8, 2, 4].","code":["var x ="," Tensor.of(Float, [2, 3], ["," 1, 3, 6,"," -8, 2, 4,"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [1, 2] and values [-2, 1].","code":["var k ="," Tensor.of(Float, [1, 2], ["," -2, 1"," ])"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [2, 2] and value [1.0, 0.0, 18.0, 0.0].","code":["y.shape == [2, 2]","y.items == [1.0, 0.0, 18.0, 0.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Convolution can be performed using tensors with an additional dimension as batch size. [0]", + "result":"PASS", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -100,11 +230,80 @@ }, { - "id":"The \"x\" (convolution) operator produces expected results (On the CPU).", + "id":"Convolution can be performed using tensors with an additional dimension as batch size. [1]", "result":"PASS", - "duration":"0.054 seconds", + "duration":"0.052 seconds", "iterations":{ - "tags":{},"see":[],"extraInfo":["\n The 'x' operator performs convolution on the provided operands.\n The meaning of the operands is not defined, so one the kernel tensor\n can be the first and second operand. \n ","\n The 'x' operator performs convolution on the provided operands.\n The meaning of the operands is not defined, so one the kernel tensor\n can be the first and second operand. \n "] + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 3D tensor with shape [2, 2, 2] and values [1, 3, 6, -8, 2, 4, 5, 7].","code":["var x ="," Tensor.of(Float, [2, 2, 2], ["," 1, 3,"," 6, -8,"," 2, 4,"," 5, 7"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [2, 2] and values [-2, 1, 4, 5].","code":["var k ="," Tensor.of(Float, [1, 2, 2], ["," -2, 1,"," 4, 5"," ])"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [2, 1, 1] and value [-15.0, 55.0].","code":["y.shape == [2, 1, 1]","y.items == [-15.0, 55.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Convolution can be performed using tensors with an additional dimension as batch size. [2]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A 3D tensor with shape [2, 2, 2] and values [1, 3, 6, -8, 2, 4, 5, 7].","code":["var x ="," Tensor.of(Float, [2, 2, 2], ["," 1, 3,"," 6, -8,"," 2, 4,"," 5, 7"," ])"]}, + + {"kind":"and","text":"A 2D kernel with shape [2, 2] and values [-2, 1, 4, 5].","code":["var k ="," Tensor.of(Float, [1, 2, 2], ["," -2, 1,"," 4, 5"," ])"]}, + + {"kind":"when","text":"We perform a convolution operation on the tensor with the kernel `k`.","code":["var y = x.conv(k)"]}, + + {"kind":"then","text":"The resulting tensor should have shape [2, 1, 1] and value [-15.0, 55.0].","code":["y.shape == [2, 1, 1]","y.items == [-15.0, 55.0]"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Float"],"device":["'CPU'","'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"x\" (convolution) operator produces expected results (On the CPU). [0]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n The 'x' operator performs convolution on the provided operands.\n The meaning of the operands is not defined, so one the kernel tensor\n can be the first and second operand. \n "] + }, + "blocks":[ + {"kind":"given","text":"Gradient auto apply for tensors in ue is set to false.","code":["Neureka.get().settings().autograd().setIsApplyingGradientWhenTensorIsUsed(false)"]}, + + {"kind":"and","text":"Tensor legacy view is set to true.","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"Two new 3D tensor instances with the shapes: [2x3x1] & [1x3x2].","code":["var x = Tensor.of(Shape.of(2, 3, 1),"," new double[]{"," 3, 2, -1,"," -2, 2, 4"," }"," )"," .mut.toType(type)","var y = Tensor.of(Shape.of(1, 3, 2),"," new double[]{"," 4, -1,"," 3, 2,"," 3, -1"," }"," )"," .mut.toType(type)"]}, + + {"kind":"when","text":"The x-mul result is being instantiated by passing a simple equation to the tensor constructor.","code":["var z = Tensor.of(\"I0xi1\", x, y)"]}, + + {"kind":"then","text":"The result contains the expected String.","code":["z.toString().contains(expected)"]}, + + {"kind":"when","text":"The x-mul result is being instantiated by passing a object array containing equation parameters and syntax.","code":["z = Tensor.of(new Object[]{x, \"x\", y})"]}, + + {"kind":"then","text":"The result contains the expected String.","code":["z.toString().contains(expected)"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"],"expected":["\"[2x1x2]:(15.0, 2.0, 10.0, 2.0)\"","\"[2x1x2]:(15.0, 2.0, 10.0, 2.0)\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"x\" (convolution) operator produces expected results (On the CPU). [1]", + "result":"PASS", + "duration":"0.088 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":["\n The 'x' operator performs convolution on the provided operands.\n The meaning of the operands is not defined, so one the kernel tensor\n can be the first and second operand. \n "] }, "blocks":[ {"kind":"given","text":"Gradient auto apply for tensors in ue is set to false.","code":["Neureka.get().settings().autograd().setIsApplyingGradientWhenTensorIsUsed(false)"]}, @@ -129,7 +328,7 @@ { "id":"Manual convolution produces expected result.", "result":"PASS", - "duration":"0.051 seconds", + "duration":"0.063 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -144,9 +343,30 @@ }, { - "id":"Very simple manual convolution produces expected result.", + "id":"Very simple manual convolution produces expected result. [0]", + "result":"PASS", + "duration":"0.056 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We set the experimental \"autoConvertToFloat\" flag to true.","code":["Neureka.get().backend().find(CLBackend).ifPresent({ it.settings.autoConvertToFloat=true })"]}, + + {"kind":"and","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)","Tensor a = Tensor.of([4, 4], 0d..16d).to( device )","Tensor x = a[1..-2,0..-1]","Tensor y = a[0..-3,0..-1]","Tensor z = a[2..-1,0..-1]"]}, + + {"kind":"when","text":"","code":["Tensor rowconvol = x + y + z","Tensor k = rowconvol[0..-1,1..-2]","Tensor v = rowconvol[0..-1,0..-3]","Tensor j = rowconvol[0..-1,2..-1]","Tensor u = a[1..-2,1..-2]","Tensor colconvol = k + v + j - 9 * u","String xAsStr = x.toString()","String yAsStr = y.toString()","String zAsStr = z.toString()","String rcAsStr = rowconvol.toString()","String kAsStr = k.toString()","String vAsStr = v.toString()","String jAsStr = j.toString()","String uAsStr = u.toString()"]}, + + {"kind":"then","text":"","code":["xAsStr.contains(\"(2x4):[4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0]\")","yAsStr.contains(\"(2x4):[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]\")","zAsStr.contains(\"(2x4):[8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0]\")","rcAsStr.contains(\"(2x4):[12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0]\")","kAsStr.contains(\"(2x2):[15.0, 18.0, 27.0, 30.0]\")","vAsStr.contains(\"(2x2):[12.0, 15.0, 24.0, 27.0]\")","jAsStr.contains(\"(2x2):[18.0, 21.0, 30.0, 33.0]\")","uAsStr.contains(\"(2x2):[5.0, 6.0, 9.0, 10.0]\")","colconvol.toString().contains(\"(2x2):[0.0, 0.0, 0.0, 0.0]\")"]}, + + {"kind":"where","text":"The following data is being used for tensor instantiation :","code":{"device":["CPU.get()","Device.get(\"openCL\")"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Very simple manual convolution produces expected result. [1]", "result":"PASS", - "duration":"0.066 seconds", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -167,7 +387,7 @@ { "id":"Autograd works with simple 2D convolution.", "result":"PASS", - "duration":"0.033 seconds", + "duration":"0.061 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -192,7 +412,7 @@ { "id":"Sime convolution works as expected eith autograd.", "result":"PASS", - "duration":"0.034 seconds", + "duration":"0.063 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -217,7 +437,7 @@ { "id":"Tensors have the correct layout after convolution.", "result":"PASS", - "duration":"0.033 seconds", + "duration":"0.057 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Device_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Device_Spec.json index 79f7484fc..c2ffd6cb4 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Device_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Device_Spec.json @@ -1,21 +1,21 @@ { "className":"ut.tensors.Tensor_Device_Spec", "title":"Tensors on Devices", - "narrative":"This unit test specification covers\n the expected behavior of tensors when interacting\n with instances of implementations of the Device interface.", + "narrative":"This unit test specification covers \n the expected behavior of tensors when interacting\n with instances of implementations of the Device interface.", "subjects":[], "statistics":{ - "runs":"4", + "runs":"3", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"0.004 seconds" + "skipped":"1", + "duration":"0.015 seconds" }, "headers":["\n Here you will find out how to store tensors on devices,\n how to move tensors between devices and how to use\n the device specific methods of the tensor class.\n "],"tags":{},"see":[], "features":[ { "id":"Adding OpenCL device to tensor makes tensor be \"outsourced\" and contain the Device instance as component.", - "result":"PASS", + "result":"IGNORED", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] @@ -37,7 +37,7 @@ { "id":"Tensors try to migrate themselves to a device that is being added to them as component.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -58,7 +58,7 @@ { "id":"The device of a tensor can be accessed via the \"device()\" method.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -83,7 +83,7 @@ { "id":"When creating slices of tensors then this should trigger a \"parent - child\" relation noticeable to the device!", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Dot_Product_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Dot_Product_Spec.json index bbf25e2d0..f5fc05cc6 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Dot_Product_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Dot_Product_Spec.json @@ -4,19 +4,19 @@ "narrative":"A tensor can also be a simple vector, which is a tensor of rank 1.\n This specification demonstrates how to perform dot products on tensors of rank 1.", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"7", + "runs":"21", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.137 seconds" + "duration":"0.037 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The \"dot\" method calculates the dot product between vectors.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"You can slice a Matrix into vectors and then used them for dot products.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,7 @@ { "id":"The \"dot\" operation supports autograd.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n The \"dot\" operation supports autograd.\n This means that you can use it to calculate the gradient of a weight tensor.\n This is useful for when you want to build a neural network or some other machine learning model.\n "] }, @@ -71,11 +71,11 @@ }, { - "id":"The dot product operation runs on any device.", + "id":"The dot product operation runs on any device. [0]", "result":"PASS", - "duration":"0.079 seconds", + "duration":"0.002 seconds", "iterations":{ - "tags":{},"see":[],"extraInfo":["\n The dot product operation runs on any device that \n supports OpenCL (meaning that it has OpenCL drivers installed).\n ","\n The dot product operation runs on any device that \n supports OpenCL (meaning that it has OpenCL drivers installed).\n "] + "tags":{},"see":[],"extraInfo":["\n The dot product operation runs on any device that \n supports OpenCL (meaning that it has OpenCL drivers installed).\n "] }, "blocks":[ {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(-1f, -3f, 0f, 4f, 2f).to( device )","var b = Tensor.of( 1f, 2f, 7f, -1f, 3f).to( device )"]}, @@ -90,9 +90,49 @@ }, { - "id":"The dot operation works for virtual tensors as well.", + "id":"The dot product operation runs on any device. [1]", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(-1f, -3f, 0f, 4f, 2f).to( device )","var b = Tensor.of( 1f, 2f, 7f, -1f, 3f).to( device )"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == [ -1f * 1f + -3f * 2f + 0f * 7f + 4f * -1f + 2f * 3f ]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot operation works for virtual tensors as well. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(Shape.of(8), 3f).to(device)","var b = Tensor.of(Shape.of(8), 3f).to(device)"]}, + + {"kind":"expect","text":"the tensors are virtual.","code":["a.isVirtual() // They are scalars in disguise!","b.isVirtual()"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == [ 3f * 3f * 8f ]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot operation works for virtual tensors as well. [1]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -111,9 +151,9 @@ }, { - "id":"The dot operation work even when one tensor is virtual.", + "id":"The dot operation work even when one tensor is virtual. [0]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -132,9 +172,239 @@ }, { - "id":"The dot product works across different types and devices.", + "id":"The dot operation work even when one tensor is virtual. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(Shape.of(8), 3f).to(device)","var b = Tensor.of(Shape.of(8), Data.of(3f, 4f, -1f)).to(device)"]}, + + {"kind":"expect","text":"the tensors are virtual.","code":["a.isVirtual() // They are scalars in disguise!","!b.isVirtual()"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == [ 57f ]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [2]", "result":"PASS", - "duration":"0.041 seconds", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A pair of vector tensors which we move to the device!","code":["var a = Tensor.of(data1).to(device)","var b = Tensor.of(data2).to(device)"]}, + + {"kind":"when","text":"we calculate the dot product of a and b.","code":["var result = a.dot(b)"]}, + + {"kind":"then","text":"the result is a scalar.","code":["result.shape == Shape.of(1)","result.items == expected"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"data1":["[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]","[ 8d, -4d, -1d ] as double[]","[ 2d, 3d, 4d ] as double[]","[ 1d, -4d ] as double[]","[ 8, -4, -1 ] as int[]","[ 42 ] as int[]","[ 2, 3, 4 ] as long[]","[ 1, -4 ] as long[]","[ 42 ] as long[]","[ 8f, -4f, -1f ] as float[]","[ 42f ] as float[]"],"data2":["[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]","[ 1d, 2d, 4d ] as double[]","[ 0d, 2d, 3d ] as double[]","[ 4d, 2d ] as double[]","[ 1, 2, 4 ] as int[]","[ 56 ] as int[]","[ 0, 2, 3 ] as long[]","[ 4, 2 ] as long[]","[ 56 ] as long[]","[ 1f, 2f, 4f ] as float[]","[ 56f ] as float[]"],"expected":["[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]","[ 8d * 1d + -4d * 2d + -1d * 4d ]","[ 2d * 0d + 3d * 2d + 4d * 3d ]","[ 1d * 4d + -4d * 2d ]","[ 8 * 1 + -4 * 2 + -1 * 4 ]","[ 42 * 56 ]","[ 2 * 0 + 3 * 2 + 4 * 3 ]","[ 1 * 4 + -4 * 2 ]","[ 42 * 56 ]","[ 8f * 1f + -4f * 2f + -1f * 4f ]","[ 42f * 56f ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The dot product works across different types and devices. [11]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Generics_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Generics_Spec.json index 86addb342..2c5f59f4f 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Generics_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Generics_Spec.json @@ -4,19 +4,19 @@ "narrative":"Tensors do not just store numeric data.\n They can hold anything which can be stuffed into a \"Object[]\" array.\n You could even create a tensor of tensors!", "subjects":[], "statistics":{ - "runs":"3", + "runs":"7", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.004 seconds" + "duration":"0.008 seconds" }, "headers":["\n Here you will find out how to create a tensor of any kind of data. \n "],"tags":{},"see":[], "features":[ { "id":"Anonymous tensor instance has the default datatype class as defined in Neureka settings.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"We can create a tensor of strings.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -50,7 +50,91 @@ }, { - "id":"1D tensors can be created from primitive arrays.", + "id":"1D tensors can be created from primitive arrays. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def t = Tensor.of(data)"]}, + + {"kind":"expect","text":"","code":["t.rank() == 1"]}, + + {"kind":"and","text":"","code":["t.size() == size"]}, + + {"kind":"and","text":"","code":["t.getItemType() == expected"]}, + + {"kind":"where","text":"","code":{"size":["3","4","2","3","1"],"data":["new float[]{-1f, 3f, 6f}","new int[]{1, -2 , 9, 12}","new byte[]{42 , 73}","new long[]{-16 , 54, 12}","new short[]{26}"],"expected":["Float","Integer","Byte","Long","Short"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"1D tensors can be created from primitive arrays. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def t = Tensor.of(data)"]}, + + {"kind":"expect","text":"","code":["t.rank() == 1"]}, + + {"kind":"and","text":"","code":["t.size() == size"]}, + + {"kind":"and","text":"","code":["t.getItemType() == expected"]}, + + {"kind":"where","text":"","code":{"size":["3","4","2","3","1"],"data":["new float[]{-1f, 3f, 6f}","new int[]{1, -2 , 9, 12}","new byte[]{42 , 73}","new long[]{-16 , 54, 12}","new short[]{26}"],"expected":["Float","Integer","Byte","Long","Short"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"1D tensors can be created from primitive arrays. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def t = Tensor.of(data)"]}, + + {"kind":"expect","text":"","code":["t.rank() == 1"]}, + + {"kind":"and","text":"","code":["t.size() == size"]}, + + {"kind":"and","text":"","code":["t.getItemType() == expected"]}, + + {"kind":"where","text":"","code":{"size":["3","4","2","3","1"],"data":["new float[]{-1f, 3f, 6f}","new int[]{1, -2 , 9, 12}","new byte[]{42 , 73}","new long[]{-16 , 54, 12}","new short[]{26}"],"expected":["Float","Integer","Byte","Long","Short"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"1D tensors can be created from primitive arrays. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["def t = Tensor.of(data)"]}, + + {"kind":"expect","text":"","code":["t.rank() == 1"]}, + + {"kind":"and","text":"","code":["t.size() == size"]}, + + {"kind":"and","text":"","code":["t.getItemType() == expected"]}, + + {"kind":"where","text":"","code":{"size":["3","4","2","3","1"],"data":["new float[]{-1f, 3f, 6f}","new int[]{1, -2 , 9, 12}","new byte[]{42 , 73}","new long[]{-16 , 54, 12}","new short[]{26}"],"expected":["Float","Integer","Byte","Long","Short"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"1D tensors can be created from primitive arrays. [4]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.tensors.Tensor_Gradient_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Gradient_Spec.json index 921d4c5b9..64ea29ae0 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Gradient_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Gradient_Spec.json @@ -4,19 +4,19 @@ "narrative":"This specification defines the gradient API on tensors.\n So one ought to be able to check wetter or not a tensor has a gradient attached to it or not.\n In that case one should be able to get this gradient and then work with\n it independently of the original tensor to which it belongs to...", "subjects":[], "statistics":{ - "runs":"3", + "runs":"4", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.008 seconds" }, "headers":["\n

        Tensor Gradient Unit Tests

        \n
        \n Why is there a difference between \"rqsGradient()\" and \"hasGradient()\" ? :\n

        \n

        \n The latter property simply tells if a tensor has another tensor as component.\n This however does not necessitate it to also require gradients via the autograd system.\n This is what the prior property is for. \n

        \n "],"tags":{},"see":[], "features":[ { "id":"Tensors can have gradients but not require them.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,30 @@ }, { - "id":"Gradient of tensor is being applies regardless of the tensor requiring gradient or not", + "id":"Gradient of tensor is being applies regardless of the tensor requiring gradient or not [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A new simple tensor.","code":["Tensor t = Tensor.of(-3d)"]}, + + {"kind":"and","text":"A second tensor viewed as gradient.","code":["Tensor g = Tensor.of(9d)"]}, + + {"kind":"and","text":"The gradient tensor is added to the prior tensor as component.","code":["t.set( g )"]}, + + {"kind":"when","text":"The request to apply the gradient is being made.","code":["t.applyGradient()"]}, + + {"kind":"then","text":"The tensor changed as expected.","code":["t.toString().contains(expected)"]}, + + {"kind":"where","text":"","code":{"requiresGradient":["true","false"],"expected":["\"(1):[6.0]\"","\"(1):[6.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Gradient of tensor is being applies regardless of the tensor requiring gradient or not [1]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.tensors.Tensor_IO_Spec.json b/docs/spock/reports/ut.tensors.Tensor_IO_Spec.json index f11039777..9e7812a4c 100644 --- a/docs/spock/reports/ut.tensors.Tensor_IO_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_IO_Spec.json @@ -4,119 +4,829 @@ "narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to read from and write to the state of a tensor.", "subjects":[], "statistics":{ - "runs":"8", + "runs":"43", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.031 seconds" + "duration":"0.069 seconds" }, "headers":["\n This specification covers some basic behaviour related to\n reading and modifying the data inside a tensor.\n "],"tags":{},"see":[], "features":[ { "id":"Indexing after reshaping works as expected.", "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We are using the legacy view for tensors where bracket types are swapped, just because...","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"A new tensor instance with the shape (4x3).","code":["var t1 = Tensor.of([4, 3], 1d..12d)"]}, + + {"kind":"when","text":"Recording the index behavior before and after a permute operation...","code":["var t1_ioi_1 = t1.indexOfIndices(new int[]{2, 1})","var t1_ioi_2 = t1.indexOfIndices(new int[]{1, 2})","var t1_indices = t1.indicesOfIndex(5)","var t2 = Function.of(\" [ 1, 0 ]:( I[0] ) \")(t1)","var t2_ioi_1 = t2.indexOfIndices(new int[]{1, 2})","var t2_idx = t2.indicesOfIndex(7)","var t1_ioi_3 = t1.indexOfIndices(t1.indicesOfIndex(7)) // Element 7 '8.0' is at index 7!","var t2_ioi_2 = t2.indexOfIndices(t2.indicesOfIndex(7)) // Element 7 '11.0' is at index 10!"]}, + + {"kind":"then","text":"These recorded values are as one would expect.","code":["t1_ioi_1 == 7","t1_ioi_2 == 5","t1_indices[0] == 1","t1_indices[1] == 2","t2_ioi_1 == 7","t2_idx[0] == 1","t2_idx[1] == 3","t1_ioi_3 == 7 // Element 7 '8.0' is at index 7!","t2_ioi_2 == 10 // Element 7 '11.0' is at index 10!","t1.toString().contains(\"[4x3]:(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0)\")","t2.toString().contains(\"[3x4]:(1.0, 4.0, 7.0, 10.0, 2.0, 5.0, 8.0, 11.0, 3.0, 6.0, 9.0, 12.0)\")"]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensor value type can not be changed by passing float or double arrays to it.", + "result":"PASS", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We are using the legacy view for tensors where bracket types are swapped, just because...","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"A new tensor instance.","code":["var x = Tensor.of(3d)"]}, + + {"kind":"when","text":"Setting the value of the tensor...","code":["float[] floats = new float[1]","floats[0] = 5","x.mut.setItems(floats)"]}, + + {"kind":"then","text":"...the tensor will change as expected.","code":["!(x.getItems() instanceof float[])","!(x.mut.data.get() instanceof float[])","!(x.rawData instanceof float[])","x.getItemsAs( float[].class )[ 0 ]==5.0f","x.getItemsAs( double[].class )[0]==5.0d"]}, + + {"kind":"when","text":"Doing the same with double array...","code":["double[] doubles = new double[1]","doubles[0] = 4.0","x.mut.setItems(doubles)"]}, + + {"kind":"then","text":"...once again the tensor changes as expected.","code":["x.rawItems instanceof double[]","x.mut.data.get() instanceof double[]","x.rawData instanceof double[]","x.getItemsAs( float[].class )[ 0 ]==4.0f","x.getItemsAs( double[].class )[0]==4.0d","x.isLeave()","!x.isBranch()","!x.isOutsourced()","!x.isVirtual()","!x.isSlice()","!x.isSliceParent()","!x.belongsToGraph()","x.getDevice() !=null","x.getDevice() instanceof CPU","x.rank()==1","!x.rqsGradient()","x.size()==1"]}, + + {"kind":"when","text":"","code":["when : x.mut.toType( Float.class )"]}, + + {"kind":"then","text":"","code":["then : x.rawItems instanceof float[]"]}, + + {"kind":"when","text":"","code":["doubles = new double[1]","doubles[0] = 7.0","x.mut.setItems(doubles)"]}, + + {"kind":"then","text":"","code":["!(x.rawItems instanceof double[])","!(x.mut.data.get() instanceof double[])","!(x.rawData instanceof double[])","x.getItemsAs( float[].class )[ 0 ]==7.0f","x.getItemsAs( double[].class )[0]==7.0d"]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can re-populate a tensor of shorts from a single scalar value! [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + + {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + + {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can manipulate the underlying data array of a tensor through the mut API. [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, + + {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, + + {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, + + {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + + {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + + {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(1, 1, 1)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"expect","text":"The tensor is virtual because it is filled homogeneously with the same value.","code":["t.isVirtual()","t.items == [1, 1, 1]","t.rawData == [1] // The data array is a single element array."]}, + + {"kind":"when","text":"We access the third item of the tensor and set the value 42.","code":["t.mut.at(2).set(42)"]}, + + {"kind":"then","text":"The tensor is no longer virtual because it now stores 2 different values.","code":["!t.isVirtual()","t.items == [1, 1, 42]","t.rawData == [1, 1, 42]"]}, + + {"kind":"where","text":"We ensure that this works on different devices and with different data types.","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual. [1]", + "result":"PASS", "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We are using the legacy view for tensors where bracket types are swapped, just because...","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(1, 1, 1)"]}, + + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, + + {"kind":"expect","text":"The tensor is virtual because it is filled homogeneously with the same value.","code":["t.isVirtual()","t.items == [1, 1, 1]","t.rawData == [1] // The data array is a single element array."]}, + + {"kind":"when","text":"We access the third item of the tensor and set the value 42.","code":["t.mut.at(2).set(42)"]}, + + {"kind":"then","text":"The tensor is no longer virtual because it now stores 2 different values.","code":["!t.isVirtual()","t.items == [1, 1, 42]","t.rawData == [1, 1, 42]"]}, + + {"kind":"where","text":"We ensure that this works on different devices and with different data types.","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(1, 1, 1)"]}, - {"kind":"and","text":"A new tensor instance with the shape (4x3).","code":["var t1 = Tensor.of([4, 3], 1d..12d)"]}, + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, - {"kind":"when","text":"Recording the index behavior before and after a permute operation...","code":["var t1_ioi_1 = t1.indexOfIndices(new int[]{2, 1})","var t1_ioi_2 = t1.indexOfIndices(new int[]{1, 2})","var t1_indices = t1.indicesOfIndex(5)","var t2 = Function.of(\" [ 1, 0 ]:( I[0] ) \")(t1)","var t2_ioi_1 = t2.indexOfIndices(new int[]{1, 2})","var t2_idx = t2.indicesOfIndex(7)","var t1_ioi_3 = t1.indexOfIndices(t1.indicesOfIndex(7)) // Element 7 '8.0' is at index 7!","var t2_ioi_2 = t2.indexOfIndices(t2.indicesOfIndex(7)) // Element 7 '11.0' is at index 10!"]}, + {"kind":"expect","text":"The tensor is virtual because it is filled homogeneously with the same value.","code":["t.isVirtual()","t.items == [1, 1, 1]","t.rawData == [1] // The data array is a single element array."]}, - {"kind":"then","text":"These recorded values are as one would expect.","code":["t1_ioi_1 == 7","t1_ioi_2 == 5","t1_indices[0] == 1","t1_indices[1] == 2","t2_ioi_1 == 7","t2_idx[0] == 1","t2_idx[1] == 3","t1_ioi_3 == 7 // Element 7 '8.0' is at index 7!","t2_ioi_2 == 10 // Element 7 '11.0' is at index 10!","t1.toString().contains(\"[4x3]:(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0)\")","t2.toString().contains(\"[3x4]:(1.0, 4.0, 7.0, 10.0, 2.0, 5.0, 8.0, 11.0, 3.0, 6.0, 9.0, 12.0)\")"]} + {"kind":"when","text":"We access the third item of the tensor and set the value 42.","code":["t.mut.at(2).set(42)"]}, + + {"kind":"then","text":"The tensor is no longer virtual because it now stores 2 different values.","code":["!t.isVirtual()","t.items == [1, 1, 42]","t.rawData == [1, 1, 42]"]}, + + {"kind":"where","text":"We ensure that this works on different devices and with different data types.","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Float"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Tensor value type can not be changed by passing float or double arrays to it.", + "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual. [3]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We are using the legacy view for tensors where bracket types are swapped, just because...","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, - - {"kind":"and","text":"A new tensor instance.","code":["var x = Tensor.of(3d)"]}, - - {"kind":"when","text":"Setting the value of the tensor...","code":["float[] floats = new float[1]","floats[0] = 5","x.mut.setItems(floats)"]}, - - {"kind":"then","text":"...the tensor will change as expected.","code":["!(x.getItems() instanceof float[])","!(x.mut.data.get() instanceof float[])","!(x.rawData instanceof float[])","x.getItemsAs( float[].class )[ 0 ]==5.0f","x.getItemsAs( double[].class )[0]==5.0d"]}, - - {"kind":"when","text":"Doing the same with double array...","code":["double[] doubles = new double[1]","doubles[0] = 4.0","x.mut.setItems(doubles)"]}, + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(1, 1, 1)"]}, - {"kind":"then","text":"...once again the tensor changes as expected.","code":["x.rawItems instanceof double[]","x.mut.data.get() instanceof double[]","x.rawData instanceof double[]","x.getItemsAs( float[].class )[ 0 ]==4.0f","x.getItemsAs( double[].class )[0]==4.0d","x.isLeave()","!x.isBranch()","!x.isOutsourced()","!x.isVirtual()","!x.isSlice()","!x.isSliceParent()","!x.belongsToGraph()","x.getDevice() !=null","x.getDevice() instanceof CPU","x.rank()==1","!x.rqsGradient()","x.size()==1"]}, + {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, - {"kind":"when","text":"","code":["when : x.mut.toType( Float.class )"]}, + {"kind":"expect","text":"The tensor is virtual because it is filled homogeneously with the same value.","code":["t.isVirtual()","t.items == [1, 1, 1]","t.rawData == [1] // The data array is a single element array."]}, - {"kind":"then","text":"","code":["then : x.rawItems instanceof float[]"]}, + {"kind":"when","text":"We access the third item of the tensor and set the value 42.","code":["t.mut.at(2).set(42)"]}, - {"kind":"when","text":"","code":["doubles = new double[1]","doubles[0] = 7.0","x.mut.setItems(doubles)"]}, + {"kind":"then","text":"The tensor is no longer virtual because it now stores 2 different values.","code":["!t.isVirtual()","t.items == [1, 1, 42]","t.rawData == [1, 1, 42]"]}, - {"kind":"then","text":"","code":["!(x.rawItems instanceof double[])","!(x.mut.data.get() instanceof double[])","!(x.rawData instanceof double[])","x.getItemsAs( float[].class )[ 0 ]==7.0f","x.getItemsAs( double[].class )[0]==7.0d"]} + {"kind":"where","text":"We ensure that this works on different devices and with different data types.","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Float"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"We can re-populate a tensor of shorts from a single scalar value!", + "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual. [4]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 666, 73)"]}, + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(1, 1, 1)"]}, {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, - {"kind":"when","text":"We call the \"setValue\" method with a scalar value passed to it...","code":["t.mut.setItems(5)"]}, + {"kind":"expect","text":"The tensor is virtual because it is filled homogeneously with the same value.","code":["t.isVirtual()","t.items == [1, 1, 1]","t.rawData == [1] // The data array is a single element array."]}, - {"kind":"then","text":"The value of the tensor will be an array of 3.","code":["t.items == [5, 5, 5]"]}, + {"kind":"when","text":"We access the third item of the tensor and set the value 42.","code":["t.mut.at(2).set(42)"]}, - {"kind":"and","text":"We now expect the tensor to be virtual, because it stores only a single type of value.","code":["t.isVirtual()"]}, + {"kind":"then","text":"The tensor is no longer virtual because it now stores 2 different values.","code":["!t.isVirtual()","t.items == [1, 1, 42]","t.rawData == [1, 1, 42]"]}, - {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Byte","Integer","Long","Double","Short","Float","Byte","Integer","Long","Double","Short","Float"]}} + {"kind":"where","text":"We ensure that this works on different devices and with different data types.","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Float"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"We can manipulate the underlying data array of a tensor through the mut API.", + "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual. [5]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(42, 66, 73)"]}, + {"kind":"given","text":"A tensor of 3 numbers:","code":["var t = Tensor.of(type).vector(1, 1, 1)"]}, {"kind":"and","text":"We store the tensor on the given device, to ensure that it work there as well.","code":["t.to(device)"]}, - {"kind":"when","text":"We create a slice which should internally reference the same data as the slice parent.","code":["var s = t[1]"]}, - - {"kind":"then","text":"The slice has the expected state!","code":["s.isSlice()","s.items == [66]","s.rawData == [42, 66, 73]"]}, - - {"kind":"when","text":"We call the \"setData\" method with a scalar value passed to it...","code":["s.mut.setDataAt(1, -9)"]}, - - {"kind":"then","text":"The change will be reflected in the slice...","code":["s.items == [-9]"]}, + {"kind":"expect","text":"The tensor is virtual because it is filled homogeneously with the same value.","code":["t.isVirtual()","t.items == [1, 1, 1]","t.rawData == [1] // The data array is a single element array."]}, - {"kind":"and","text":"Also in the slice parent!","code":["t.items == [42, -9, 73]"]}, + {"kind":"when","text":"We access the third item of the tensor and set the value 42.","code":["t.mut.at(2).set(42)"]}, - {"kind":"and","text":"Both tensors should have the same data array!","code":["s.rawData == [42, -9, 73]","t.rawData == [42, -9, 73]"]}, + {"kind":"then","text":"The tensor is no longer virtual because it now stores 2 different values.","code":["!t.isVirtual()","t.items == [1, 1, 42]","t.rawData == [1, 1, 42]"]}, - {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Double","Float","Byte","Short","Integer","Long"]}} + {"kind":"where","text":"We ensure that this works on different devices and with different data types.","code":{"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'"],"type":["Double","Float","Byte","Short","Integer","Long","Float"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual.", + "id":"When we try to manipulate the underlying data array of a virtual tensor then it will become actual. [6]", "result":"PASS", "duration":"0", "iterations":{ @@ -141,7 +851,7 @@ { "id":"A tensor produced by the static \"Tensor.newRandom(shape)\" has expected \"random\" value.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -168,7 +878,7 @@ { "id":"Tensor values can be manipulated", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -223,7 +933,210 @@ }, { - "id":"The tensor data array can be modified by targeting them with an index.", + "id":"The tensor data array can be modified by targeting them with an index. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"when","text":"","code":["t.mut.setDataAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.getDataAt( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"when","text":"","code":["t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"and","text":"","code":["t.mut.setItemAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.item( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Byte","Short","Long","Integer","Boolean","Character"],"shape":["[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,1]","[2,1]"],"data":["[-42, 24, 9, 3, -34] as float[]","[-42, 24, 9, 3, -34] as double[]","[-42, 24, 9, 3, -34] as byte[]","[-42, 24, 9, 3, -34] as short[]","[-42, 24, 9, 3, -34] as long[]","[-42, 24, 9, 3, -34] as int[]","[false, true, false] as boolean[]","['a', 'b', 'c'] as char[]"],"element":["0.032 as float","0.032 as double","1 as byte","1 as short","1 as long","1 as int","false","'x' as char"],"expected":["[-42.0, 0.032, 9.0, 3.0] as float[]","[-42.0, 0.032, 9.0, 3.0] as double[]","[-42, 1, 9, 3] as byte[]","[-42, 1, 9, 3] as short[]","[-42, 1, 9, 3] as long[]","[-42, 1, 9, 3] as int[]","[false, false] as boolean[]","['a', 'x'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The tensor data array can be modified by targeting them with an index. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"when","text":"","code":["t.mut.setDataAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.getDataAt( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"when","text":"","code":["t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"and","text":"","code":["t.mut.setItemAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.item( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Byte","Short","Long","Integer","Boolean","Character"],"shape":["[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,1]","[2,1]"],"data":["[-42, 24, 9, 3, -34] as float[]","[-42, 24, 9, 3, -34] as double[]","[-42, 24, 9, 3, -34] as byte[]","[-42, 24, 9, 3, -34] as short[]","[-42, 24, 9, 3, -34] as long[]","[-42, 24, 9, 3, -34] as int[]","[false, true, false] as boolean[]","['a', 'b', 'c'] as char[]"],"element":["0.032 as float","0.032 as double","1 as byte","1 as short","1 as long","1 as int","false","'x' as char"],"expected":["[-42.0, 0.032, 9.0, 3.0] as float[]","[-42.0, 0.032, 9.0, 3.0] as double[]","[-42, 1, 9, 3] as byte[]","[-42, 1, 9, 3] as short[]","[-42, 1, 9, 3] as long[]","[-42, 1, 9, 3] as int[]","[false, false] as boolean[]","['a', 'x'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The tensor data array can be modified by targeting them with an index. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"when","text":"","code":["t.mut.setDataAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.getDataAt( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"when","text":"","code":["t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"and","text":"","code":["t.mut.setItemAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.item( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Byte","Short","Long","Integer","Boolean","Character"],"shape":["[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,1]","[2,1]"],"data":["[-42, 24, 9, 3, -34] as float[]","[-42, 24, 9, 3, -34] as double[]","[-42, 24, 9, 3, -34] as byte[]","[-42, 24, 9, 3, -34] as short[]","[-42, 24, 9, 3, -34] as long[]","[-42, 24, 9, 3, -34] as int[]","[false, true, false] as boolean[]","['a', 'b', 'c'] as char[]"],"element":["0.032 as float","0.032 as double","1 as byte","1 as short","1 as long","1 as int","false","'x' as char"],"expected":["[-42.0, 0.032, 9.0, 3.0] as float[]","[-42.0, 0.032, 9.0, 3.0] as double[]","[-42, 1, 9, 3] as byte[]","[-42, 1, 9, 3] as short[]","[-42, 1, 9, 3] as long[]","[-42, 1, 9, 3] as int[]","[false, false] as boolean[]","['a', 'x'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The tensor data array can be modified by targeting them with an index. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"when","text":"","code":["t.mut.setDataAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.getDataAt( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"when","text":"","code":["t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"and","text":"","code":["t.mut.setItemAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.item( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Byte","Short","Long","Integer","Boolean","Character"],"shape":["[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,1]","[2,1]"],"data":["[-42, 24, 9, 3, -34] as float[]","[-42, 24, 9, 3, -34] as double[]","[-42, 24, 9, 3, -34] as byte[]","[-42, 24, 9, 3, -34] as short[]","[-42, 24, 9, 3, -34] as long[]","[-42, 24, 9, 3, -34] as int[]","[false, true, false] as boolean[]","['a', 'b', 'c'] as char[]"],"element":["0.032 as float","0.032 as double","1 as byte","1 as short","1 as long","1 as int","false","'x' as char"],"expected":["[-42.0, 0.032, 9.0, 3.0] as float[]","[-42.0, 0.032, 9.0, 3.0] as double[]","[-42, 1, 9, 3] as byte[]","[-42, 1, 9, 3] as short[]","[-42, 1, 9, 3] as long[]","[-42, 1, 9, 3] as int[]","[false, false] as boolean[]","['a', 'x'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The tensor data array can be modified by targeting them with an index. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"when","text":"","code":["t.mut.setDataAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.getDataAt( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"when","text":"","code":["t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"and","text":"","code":["t.mut.setItemAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.item( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Byte","Short","Long","Integer","Boolean","Character"],"shape":["[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,1]","[2,1]"],"data":["[-42, 24, 9, 3, -34] as float[]","[-42, 24, 9, 3, -34] as double[]","[-42, 24, 9, 3, -34] as byte[]","[-42, 24, 9, 3, -34] as short[]","[-42, 24, 9, 3, -34] as long[]","[-42, 24, 9, 3, -34] as int[]","[false, true, false] as boolean[]","['a', 'b', 'c'] as char[]"],"element":["0.032 as float","0.032 as double","1 as byte","1 as short","1 as long","1 as int","false","'x' as char"],"expected":["[-42.0, 0.032, 9.0, 3.0] as float[]","[-42.0, 0.032, 9.0, 3.0] as double[]","[-42, 1, 9, 3] as byte[]","[-42, 1, 9, 3] as short[]","[-42, 1, 9, 3] as long[]","[-42, 1, 9, 3] as int[]","[false, false] as boolean[]","['a', 'x'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The tensor data array can be modified by targeting them with an index. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"when","text":"","code":["t.mut.setDataAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.getDataAt( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"when","text":"","code":["t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"and","text":"","code":["t.mut.setItemAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.item( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Byte","Short","Long","Integer","Boolean","Character"],"shape":["[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,1]","[2,1]"],"data":["[-42, 24, 9, 3, -34] as float[]","[-42, 24, 9, 3, -34] as double[]","[-42, 24, 9, 3, -34] as byte[]","[-42, 24, 9, 3, -34] as short[]","[-42, 24, 9, 3, -34] as long[]","[-42, 24, 9, 3, -34] as int[]","[false, true, false] as boolean[]","['a', 'b', 'c'] as char[]"],"element":["0.032 as float","0.032 as double","1 as byte","1 as short","1 as long","1 as int","false","'x' as char"],"expected":["[-42.0, 0.032, 9.0, 3.0] as float[]","[-42.0, 0.032, 9.0, 3.0] as double[]","[-42, 1, 9, 3] as byte[]","[-42, 1, 9, 3] as short[]","[-42, 1, 9, 3] as long[]","[-42, 1, 9, 3] as int[]","[false, false] as boolean[]","['a', 'x'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The tensor data array can be modified by targeting them with an index. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"when","text":"","code":["t.mut.setDataAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.getDataAt( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"when","text":"","code":["t = Tensor.of(type).withShape(shape).andFill(data)"]}, + + {"kind":"and","text":"","code":["t.mut.setItemAt( 1, element )"]}, + + {"kind":"then","text":"","code":["t.item( 1 ) == element"]}, + + {"kind":"and","text":"","code":["t.mut.data.get() == expected","t.rawData == expected"]}, + + {"kind":"where","text":"","code":{"type":["Float","Double","Byte","Short","Long","Integer","Boolean","Character"],"shape":["[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,2]","[2,1]","[2,1]"],"data":["[-42, 24, 9, 3, -34] as float[]","[-42, 24, 9, 3, -34] as double[]","[-42, 24, 9, 3, -34] as byte[]","[-42, 24, 9, 3, -34] as short[]","[-42, 24, 9, 3, -34] as long[]","[-42, 24, 9, 3, -34] as int[]","[false, true, false] as boolean[]","['a', 'b', 'c'] as char[]"],"element":["0.032 as float","0.032 as double","1 as byte","1 as short","1 as long","1 as int","false","'x' as char"],"expected":["[-42.0, 0.032, 9.0, 3.0] as float[]","[-42.0, 0.032, 9.0, 3.0] as double[]","[-42, 1, 9, 3] as byte[]","[-42, 1, 9, 3] as short[]","[-42, 1, 9, 3] as long[]","[-42, 1, 9, 3] as int[]","[false, false] as boolean[]","['a', 'x'] as char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The tensor data array can be modified by targeting them with an index. [7]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.tensors.Tensor_Instantiation_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Instantiation_Spec.json index 165f49cc9..0cd1da1b9 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Instantiation_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Instantiation_Spec.json @@ -4,17 +4,63 @@ "narrative":"Tensors are complicated data structures with a wide range of different possible states.\n They can host elements of different types residing on many kinds of different devices.\n Here we want to show how a tensor can be instantiated in different ways.", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"7", + "runs":"25", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.011 seconds" + "duration":"0.027 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Vector tensors can be instantiated via factory methods.", + "id":"Vector tensors can be instantiated via factory methods. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a vector tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == shape"]}, + + {"kind":"and","text":"The tensor contains the expected items.","code":["t.items == data"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the underlying data is also as expected.","code":["t.rawData == data","t.mut.data.get() == data // This exposes the internal data array"]}, + + {"kind":"where","text":"The following data arrays will lead to the tensor having the expected type and shape.","code":{"data":["new double[]{1.1, 2.2, 3.3}","new float[]{-0.21, 543.3}","new boolean[]{true, false}","new short[]{1, 2, 99, -123}","new long[]{3, 8, 4, 2, 3, 0}","new int[]{66, 1, 4, 42, -40}"],"type":["Double","Float","Boolean","Short","Long","Integer"],"shape":["[ 3 ]","[ 2 ]","[ 2 ]","[ 4 ]","[ 6 ]","[ 5 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Vector tensors can be instantiated via factory methods. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a vector tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == shape"]}, + + {"kind":"and","text":"The tensor contains the expected items.","code":["t.items == data"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the underlying data is also as expected.","code":["t.rawData == data","t.mut.data.get() == data // This exposes the internal data array"]}, + + {"kind":"where","text":"The following data arrays will lead to the tensor having the expected type and shape.","code":{"data":["new double[]{1.1, 2.2, 3.3}","new float[]{-0.21, 543.3}","new boolean[]{true, false}","new short[]{1, 2, 99, -123}","new long[]{3, 8, 4, 2, 3, 0}","new int[]{66, 1, 4, 42, -40}"],"type":["Double","Float","Boolean","Short","Long","Integer"],"shape":["[ 3 ]","[ 2 ]","[ 2 ]","[ 4 ]","[ 6 ]","[ 5 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Vector tensors can be instantiated via factory methods. [2]", "result":"PASS", "duration":"0", "iterations":{ @@ -37,12 +83,81 @@ }, { - "id":"Scalar tensors can be created via static factory methods", + "id":"Vector tensors can be instantiated via factory methods. [3]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"We create a vector tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == shape"]}, + + {"kind":"and","text":"The tensor contains the expected items.","code":["t.items == data"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the underlying data is also as expected.","code":["t.rawData == data","t.mut.data.get() == data // This exposes the internal data array"]}, + + {"kind":"where","text":"The following data arrays will lead to the tensor having the expected type and shape.","code":{"data":["new double[]{1.1, 2.2, 3.3}","new float[]{-0.21, 543.3}","new boolean[]{true, false}","new short[]{1, 2, 99, -123}","new long[]{3, 8, 4, 2, 3, 0}","new int[]{66, 1, 4, 42, -40}"],"type":["Double","Float","Boolean","Short","Long","Integer"],"shape":["[ 3 ]","[ 2 ]","[ 2 ]","[ 4 ]","[ 6 ]","[ 5 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Vector tensors can be instantiated via factory methods. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a vector tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == shape"]}, + + {"kind":"and","text":"The tensor contains the expected items.","code":["t.items == data"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the underlying data is also as expected.","code":["t.rawData == data","t.mut.data.get() == data // This exposes the internal data array"]}, + + {"kind":"where","text":"The following data arrays will lead to the tensor having the expected type and shape.","code":{"data":["new double[]{1.1, 2.2, 3.3}","new float[]{-0.21, 543.3}","new boolean[]{true, false}","new short[]{1, 2, 99, -123}","new long[]{3, 8, 4, 2, 3, 0}","new int[]{66, 1, 4, 42, -40}"],"type":["Double","Float","Boolean","Short","Long","Integer"],"shape":["[ 3 ]","[ 2 ]","[ 2 ]","[ 4 ]","[ 6 ]","[ 5 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Vector tensors can be instantiated via factory methods. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a vector tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == shape"]}, + + {"kind":"and","text":"The tensor contains the expected items.","code":["t.items == data"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the underlying data is also as expected.","code":["t.rawData == data","t.mut.data.get() == data // This exposes the internal data array"]}, + + {"kind":"where","text":"The following data arrays will lead to the tensor having the expected type and shape.","code":{"data":["new double[]{1.1, 2.2, 3.3}","new float[]{-0.21, 543.3}","new boolean[]{true, false}","new short[]{1, 2, 99, -123}","new long[]{3, 8, 4, 2, 3, 0}","new int[]{66, 1, 4, 42, -40}"],"type":["Double","Float","Boolean","Short","Long","Integer"],"shape":["[ 3 ]","[ 2 ]","[ 2 ]","[ 4 ]","[ 6 ]","[ 5 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, @@ -62,10 +177,335 @@ }, { - "id":"A matrix tensor can be instantiated using lists for it's shape and values.", + "id":"Scalar tensors can be created via static factory methods [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [4]", "result":"PASS", "duration":"0", "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar tensors can be created via static factory methods [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We make sure that the data is of the right type (based on the data table):","code":["data = data.asType(type)"]}, + + {"kind":"and","text":"We create a scalar tensor using the \"of\" factory method.","code":["Tensor t = Tensor.of(data)"]}, + + {"kind":"expect","text":"The resulting tensor has the expected item type class.","code":["t.itemType == type"]}, + + {"kind":"and","text":"Also the expected shape.","code":["t.shape() == [ 1 ]"]}, + + {"kind":"and","text":"The tensor has the expected data array.","code":["t.mut.data.get() == [data] // Internal data","t.rawData == [data]"]}, + + {"kind":"and","text":"The tensor is not virtual nor is it a slice... so the item array and data array contain the same values.","code":["t.items == [data]"]}, + + {"kind":"where","text":"","code":{"data":["1.1","-0.21","0.1f","-42.9","true","false","99","-123","3L","8L","1","2","-12","3"],"type":["Double","Double","Float","Float","Boolean","Boolean","Integer","Integer","Long","Long","Short","Short","Byte","Byte"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A matrix tensor can be instantiated using lists for it's shape and values.", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ "tags":{},"see":[],"extraInfo":["\n Note that the following example of tensor instantiation is \n best suited for when Neureka is used in a scripting environment\n like Groovy or Jython which support square bracket list notation.\n In Java code it is recommended to use the fluent API.\n "] }, "blocks":[ @@ -85,7 +525,7 @@ { "id":"A simple 2D vector can be instantiated using lists for it's shape and values.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Note that the following example of tensor instantiation is \n best suited for when Neureka is used in a scripting environment\n like Groovy or Jython which support square bracket list notation.\n In Java code it is recommended to use the fluent API.\n "] }, @@ -104,7 +544,7 @@ { "id":"Tensors can be instantiated based on arrays for both shapes and values.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -123,7 +563,7 @@ { "id":"Tensors can be instantiated with String seed.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -138,7 +578,7 @@ { "id":"Passing a seed in the form of a String to a tensor produces pseudo random items.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Interop_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Interop_Spec.json index ff801bf94..367e316fd 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Interop_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Interop_Spec.json @@ -4,19 +4,19 @@ "narrative":"Tensors should have good interoperability with other JDK data structures like images.\n In this specification we define these interoperability requirements.", "subjects":[], "statistics":{ - "runs":"2", + "runs":"8", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.008 seconds" + "duration":"0.035 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Tensor can be converted to buffered images.", + "id":"Tensor can be converted to buffered images. [0]", "result":"PASS", - "duration":"0.003 seconds", + "duration":"0.024 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -31,7 +31,115 @@ }, { - "id":"Not all tensor can be converted to images.", + "id":"Tensor can be converted to buffered images. [1]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"A tensor of the provided type is converted to a buffered image","code":["var asImage = Tensor.of(type).withShape(shape).andFill(42..73).asImage(image)"]}, + + {"kind":"then","text":"The resulting image has the expected shape","code":["asImage.height == shape[0]","asImage.width == shape[1]"]}, + + {"kind":"where","text":"","code":{"type":["Byte","Integer","Byte","Byte"],"image":["Tensor.ImageType.BGR_3BYTE","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ABGR_4BYTE","Tensor.ImageType.ABGR_PRE_4BYTE"],"shape":["[3, 5, 3]","[7, 5, 1]","[7, 5, 4]","[7, 5, 4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensor can be converted to buffered images. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"A tensor of the provided type is converted to a buffered image","code":["var asImage = Tensor.of(type).withShape(shape).andFill(42..73).asImage(image)"]}, + + {"kind":"then","text":"The resulting image has the expected shape","code":["asImage.height == shape[0]","asImage.width == shape[1]"]}, + + {"kind":"where","text":"","code":{"type":["Byte","Integer","Byte","Byte"],"image":["Tensor.ImageType.BGR_3BYTE","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ABGR_4BYTE","Tensor.ImageType.ABGR_PRE_4BYTE"],"shape":["[3, 5, 3]","[7, 5, 1]","[7, 5, 4]","[7, 5, 4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensor can be converted to buffered images. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"A tensor of the provided type is converted to a buffered image","code":["var asImage = Tensor.of(type).withShape(shape).andFill(42..73).asImage(image)"]}, + + {"kind":"then","text":"The resulting image has the expected shape","code":["asImage.height == shape[0]","asImage.width == shape[1]"]}, + + {"kind":"where","text":"","code":{"type":["Byte","Integer","Byte","Byte"],"image":["Tensor.ImageType.BGR_3BYTE","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ABGR_4BYTE","Tensor.ImageType.ABGR_PRE_4BYTE"],"shape":["[3, 5, 3]","[7, 5, 1]","[7, 5, 4]","[7, 5, 4]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Not all tensor can be converted to images. [0]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We try to convert a tensor of the provided type to a buffered image...","code":["Tensor.of(type).withShape(shape).all(-3).asImage(image)"]}, + + {"kind":"then","text":"An exception is thrown!","code":["var exception = thrown(IllegalArgumentException)"]}, + + {"kind":"and","text":"The exception message has a plausible size.","code":["exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["Byte","Integer","String","Byte"],"image":["Tensor.ImageType.BGR_3BYTE","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ABGR_4BYTE"],"shape":["[3, 5]","[7, 5, 3]","[7, 5, 1]","[2, 9, 3]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Not all tensor can be converted to images. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We try to convert a tensor of the provided type to a buffered image...","code":["Tensor.of(type).withShape(shape).all(-3).asImage(image)"]}, + + {"kind":"then","text":"An exception is thrown!","code":["var exception = thrown(IllegalArgumentException)"]}, + + {"kind":"and","text":"The exception message has a plausible size.","code":["exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["Byte","Integer","String","Byte"],"image":["Tensor.ImageType.BGR_3BYTE","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ABGR_4BYTE"],"shape":["[3, 5]","[7, 5, 3]","[7, 5, 1]","[2, 9, 3]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Not all tensor can be converted to images. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We try to convert a tensor of the provided type to a buffered image...","code":["Tensor.of(type).withShape(shape).all(-3).asImage(image)"]}, + + {"kind":"then","text":"An exception is thrown!","code":["var exception = thrown(IllegalArgumentException)"]}, + + {"kind":"and","text":"The exception message has a plausible size.","code":["exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["Byte","Integer","String","Byte"],"image":["Tensor.ImageType.BGR_3BYTE","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ARGB_1INT","Tensor.ImageType.ABGR_4BYTE"],"shape":["[3, 5]","[7, 5, 3]","[7, 5, 1]","[2, 9, 3]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Not all tensor can be converted to images. [3]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.tensors.Tensor_Layout_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Layout_Spec.json index 11b859788..c77bd102e 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Layout_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Layout_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.tensors.Tensor_Layout_Spec", "title":"Row or Column Major. Why not both?", - "narrative":"Although Neureka exposes tensors as row major tensors from\n a users point of view, it does in fact support both row major and column major\n based tensor layout under the hood.\n Here we cover how the layout of tensors can be modified\n and we ensure the different tensor types still work as expected...\n (The features in this specification involve mutating tensors, be careful when playing around with this yourself)", + "narrative":"Although Neureka exposes tensors as row major tensors from \n a users point of view, it does in fact support both row major and column major \n based tensor layout under the hood.\n Here we cover how the layout of tensors can be modified\n and we ensure the different tensor types still work as expected...\n (The features in this specification involve mutating tensors, be careful when playing around with this yourself)", "subjects":[], "statistics":{ - "runs":"2", + "runs":"3", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.133 seconds" + "duration":"0.184 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A new transposed version of a given tensor will be returned by the \"T()\" method.", "result":"PASS", - "duration":"0.029 seconds", + "duration":"0.059 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,9 +37,36 @@ }, { - "id":"Matrix multiplication works for both column and row major matrices across devices.", + "id":"Matrix multiplication works for both column and row major matrices across devices. [0]", "result":"PASS", - "duration":"0.101 seconds", + "duration":"0.066 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We want to view tensors in the \"(shape:[value]\" format so we set the corresponding flag.","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"and","text":"","code":["var a = Tensor.ofFloats().withShape(2, 3).andWhere({ it, idx->((7**it)%11-5).floatValue()})","var b = Tensor.ofFloats().withShape(3, 4).andWhere({ it, idx->((5**it)%11-5).floatValue()})","Device.get(device).store(a).store(b)"]}, + + {"kind":"expect","text":"","code":["a.matMul(b).toString({it.hasSlimNumbers = true}) == expectedString"]}, + + {"kind":"when","text":"","code":["a.mut.toLayout(NDConfiguration.Layout.COLUMN_MAJOR)","b.mut.toLayout(NDConfiguration.Layout.COLUMN_MAJOR)"]}, + + {"kind":"then","text":"","code":["a.matMul(b).toString({it.hasSlimNumbers = true}) == expectedString"]}, + + {"kind":"when","text":"","code":["a.mut.toLayout(NDConfiguration.Layout.ROW_MAJOR)","b.mut.toLayout(NDConfiguration.Layout.ROW_MAJOR)"]}, + + {"kind":"then","text":"","code":["a.matMul(b).toString({it.hasSlimNumbers = true}) == expectedString"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'"],"expectedString":["'(2x4):[24, -8, 8, 0, -1, 28, -14, 7]'","'(2x4):[24, -8, 8, 0, -1, 28, -14, 7]'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Matrix multiplication works for both column and row major matrices across devices. [1]", + "result":"PASS", + "duration":"0.054 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Operation_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Operation_Spec.json index 4f072759b..9e3fac91f 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Operation_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Operation_Spec.json @@ -1,22 +1,43 @@ { "className":"ut.tensors.Tensor_Operation_Spec", "title":"Running Tensors through operations", - "narrative":"This specification shows how to use the tensor API to run tensors through various operations.\n Operations are triggered either by simply calling methods on tensors or by using\n `Function` objects which are used to define custom operations in the form\n of a syntax tree.", + "narrative":"This specification shows how to use the tensor API to run tensors through various operations.\n Operations are triggered either by simply calling methods on tensors or by using \n `Function` objects which are used to define custom operations in the form \n of a syntax tree.", "subjects":[], "statistics":{ - "runs":"16", + "runs":"164", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"5.029 seconds" + "duration":"10.225 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"The \"dot\" operation reshapes and produces valid \"x\" operation result.", + "id":"The \"dot\" operation reshapes and produces valid \"x\" operation result. [0]", "result":"PASS", - "duration":"0.059 seconds", + "duration":"0.064 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two multi-dimensional tensors.","code":["var a = Tensor.of([1, 4, 4, 1 ], 4f..12f).mut.toType(type)","var b = Tensor.of([1, 3, 5, 2, 1], -5d..3d).mut.toType(type)"]}, + + {"kind":"when","text":"The \"dot\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.convDot(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape.","code":["c.toString().contains(\"(4x2x5x2)\")"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"dot\" operation reshapes and produces valid \"x\" operation result. [1]", + "result":"PASS", + "duration":"0.057 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,9 +56,9 @@ }, { - "id":"The \"matMul\" operation produces the expected result.", + "id":"The \"matMul\" operation produces the expected result. [0]", "result":"PASS", - "duration":"0.368 seconds", + "duration":"0.054 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -56,14 +77,14 @@ }, { - "id":"You can do matrix multiplication using transposed matrices.", + "id":"The \"matMul\" operation produces the expected result. [1]", "result":"PASS", - "duration":"0.386 seconds", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, @@ -71,20 +92,20 @@ {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"You can do matrix multiplication using transposed matrices as second operand.", + "id":"The \"matMul\" operation produces the expected result. [2]", "result":"PASS", - "duration":"0.497 seconds", + "duration":"0.054 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, @@ -92,281 +113,4296 @@ {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"You can do matrix multiplication using 2 transposed matrices.", + "id":"The \"matMul\" operation produces the expected result. [3]", "result":"PASS", - "duration":"0.190 seconds", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"The \"random\" function/operation populates tensors randomly.", + "id":"The \"matMul\" operation produces the expected result. [4]", "result":"PASS", "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(2,4).all(-42)"]}, - - {"kind":"and","text":"","code":["var f = Function.of('random(I[0])')"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"expect","text":"","code":["t.itemType == type"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"when","text":"","code":["var r = f(t)"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"then","text":"","code":["r === t"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"and","text":"","code":["( r.mut.data.get() as float[] ) == [1.0588075, 1.4017555, 1.2537496, -1.3897222, 1.0374786, 0.743316, 1.1692946, 1.3977289] as float[]"]}, + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"matMul\" operation produces the expected result. [5]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"","code":["r = f.with(Arg.Seed.of(42)).call(t)"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"","code":["r === t"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"and","text":"","code":["( r.mut.data.get() as float[] ) == [2.2639139286289724, -0.2763464310754003, 0.3719153742868813, -0.9768504740489802, 0.5154099159307729, 1.1608137295804097, 2.1905023977046336, -0.5449569795660217] as float[]"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"type":["Double","Float"]}} + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"The values of a randomly populated tensor seems to adhere to a gaussian distribution.", + "id":"The \"matMul\" operation produces the expected result. [6]", "result":"PASS", - "duration":"0.059 seconds", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(20, 40, 20).all(0)"]}, - - {"kind":"and","text":"","code":["var f = Function.of('random(I[0])')"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"","code":["f.with(Arg.Seed.of(-73L)).call(t)","var stats = new Statistics( t.mut.data.get() as double[] )"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"","code":["-0.05d < stats.mean && stats.mean < 0.05d"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"and","text":"","code":["0.875d < stats.variance && stats.variance < 1.125d"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"type":["Double","Float"]}} + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works.", + "id":"The \"matMul\" operation produces the expected result. [7]", "result":"PASS", - "duration":"0.277 seconds", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, - - {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, - - {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work.", + "id":"The \"matMul\" operation produces the expected result. [8]", "result":"PASS", - "duration":"0.577 seconds", + "duration":"0.055 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Overloaded operation methods on tensors produce expected results when called.", + "id":"The \"matMul\" operation produces the expected result. [9]", "result":"PASS", - "duration":"0.071 seconds", + "duration":"0.056 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","Tensor a = Tensor.of(2d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Tensor c = Tensor.of(3d).setRqsGradient(true)"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"expect","text":"","code":["( a / a ).toString().contains(\"[1]:(1.0)\")","( c % a ).toString().contains(\"[1]:(1.0)\")","( ( ( b / b ) ** c % a ) * 3 ).toString().contains(\"[1]:(3.0)\")","( a *= b ).toString().contains(\"(-8.0)\")","( a += -c ).toString().contains(\"(-11.0)\")","( a -= c ).toString().contains(\"(-14.0)\")","( a /= Tensor.of(2d) ).toString().contains(\"(-7.0)\")","( a %= c ).toString().contains(\"(-1.0)\")"]} + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Simple slice addition produces expected result.", + "id":"The \"matMul\" operation produces the expected result. [10]", "result":"PASS", - "duration":"0.056 seconds", + "duration":"0.050 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"We set the experimental \"autoConvertToFloat\" flag to true.","code":["Neureka.get().backend().find(CLBackend).ifPresent({ it.settings.autoConvertToFloat=true })"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"and","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)","Tensor a = Tensor.of([11, 11], 3d..19d).to( device )","Tensor x = a[1..-2,0..-1]","Tensor y = a[0..-3,0..-1]"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"when","text":"","code":["Tensor t = x + y","String tAsStr = t.toString({it.setRowLimit(50)})"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"then","text":"","code":["tAsStr.contains(\"(9x11):[17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, \" +"," \"26.0, 28.0, 30.0, 32.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, \" +"," \"26.0, 28.0, 30.0, 32.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, ... + 49 more]\")"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"The following data is being used for tensor instantiation :","code":{"device":["CPU.get()","Device.get(\"openCL\")"]}} + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Auto reshaping and broadcasting works and the result can be back propagated.", + "id":"The \"matMul\" operation produces the expected result. [11]", "result":"PASS", - "duration":"1.155 seconds", + "duration":"0.089 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, - - {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, - - {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + {"kind":"where","text":"We use the following data and matrix dimensions!","code":{"type":["Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","2","1","2","2","1","2","2","1","2","2","1"],"N":["2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 1, 0, 0, 1 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [0]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [1]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Scalar broadcasting works across devices.", + "id":"You can do matrix multiplication using transposed matrices. [2]", "result":"PASS", - "duration":"0.139 seconds", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"","code":["var a = Tensor.of(type).withShape(3, 2).andFill(-4..4).to(Device.get(device))","var b = Tensor.of(type).withShape(1, 1).andFill(3).to(Device.get(device))"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"expect","text":"","code":["a.itemType == type","b.itemType == type"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"then","text":"","code":["c.toString() == \"(3x2):[$cValue]\""]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'GPU'","'CPU'","'CPU'"],"type":["Double","Float","Float","Long","Integer"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }"],"cValue":["\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1, 0, 1, 2, 3, 4\"","\"-1, 0, 1, 2, 3, 4\""]}} + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Operators \"+,*,**\" produce expected results with gradients which can be accessed via a \"Ig[0]\" Function instance", + "id":"You can do matrix multiplication using transposed matrices. [3]", "result":"PASS", - "duration":"0.028 seconds", + "duration":"0.053 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, "blocks":[ - {"kind":"given","text":"Neurekas view is set to legacy and three tensors of which one requires gradients.","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","Tensor x = Tensor.of(3d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Tensor w = Tensor.of(2d)"]}, + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"when","text":"","code":["when : Tensor y = ( (x+b)*w )**2"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"","code":["then : y.toString().contains(\"[1]:(4.0); ->d[1]:(-8.0)\")"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"when","text":"","code":["when : y = ((x+b)*w)**2"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"then","text":"","code":["then : y.toString().contains(\"[1]:(4.0); ->d[1]:(-8.0)\")"]}, + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [4]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"and","text":"","code":["and : Neureka.get().settings().debug().setIsKeepingDerivativeTargetPayloads(true)"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"when","text":"","code":["y.backward(Tensor.of(1d))"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"and","text":"","code":["Tensor t2 = Tensor.of( \"Ig[0]\", [x] )","Tensor t1 = Tensor.of( \"Ig[0]\", [y] ) // The input does not have a gradient!"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"then","text":"","code":["thrown(IllegalArgumentException)"]}, + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [5]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"and","text":"","code":["t2.toString() == \"[1]:(-8.0)\""]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"and","text":"","code":["t2 == x.gradient.get()"]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"and","text":"","code":["and : Neureka.get().settings().debug().setIsKeepingDerivativeTargetPayloads(false)"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, - {"kind":"when","text":"","code":["Tensor[] trs = new Tensor[]{x}"]}, + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [6]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, - {"kind":"and","text":"","code":["def fun = Function.of(\"Ig[0]\", false)"]}, + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, - {"kind":"then","text":"","code":["fun(trs).toString() == \"[1]:(-8.0)\""]}, + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, - {"kind":"when","text":"","code":["trs[0] = y"]}, + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [7]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [8]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [9]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [10]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [11]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [12]", + "result":"PASS", + "duration":"0.050 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices. [13]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(K, N).andFill(B).mut.toType(type)"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Long","Long","Long","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d, 2d]","[2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["1","2","2","1","2","2","1","2","2","1","2","2","1","2"],"K":["2","1","2","2","1","2","2","1","2","2","1","2","2","1"],"N":["1","1","2","1","2","2","1","2","2","1","2","2","1","2"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 0.0, 2.0, -0.5, 2.5 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 0, 1, 1 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [0]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [1]", + "result":"PASS", + "duration":"0.050 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [2]", + "result":"PASS", + "duration":"0.072 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [3]", + "result":"PASS", + "duration":"0.050 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [4]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [5]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [6]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [7]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [8]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [9]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [10]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [11]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [12]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [13]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [14]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [15]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using transposed matrices as second operand. [16]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(M, K).andFill(A).mut.toType(type)","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["var c = a.matMul(b)"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Float","Float","Float","Float","Long","Long","Long","Long","Integer","Integer","Integer","Integer"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]","[1, 1]","[4, 3, 2, 1]","[-2, 1]","[-2, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]","[2]","[-0.5, 1.5, 1, -2]","[-1, -1.5]","[-1, -1.5]"],"M":["2","1","2","1","2","2","2","1","2","2","2","1","2","2","2","1","2"],"K":["1","2","2","2","1","1","2","2","1","1","2","2","1","1","2","2","1"],"N":["1","1","2","1","2","1","2","1","2","1","2","1","2","1","2","1","2"],"expectedC":["[ 2, 2 ]","[ 4 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 2.5, -2.0, 0.5, 0.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]","[ 2, 2 ]","[ 3, -2, 1, 0 ]","[ 1 ]","[ 2, 2, -1, -1 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using 2 transposed matrices. [0]", + "result":"PASS", + "duration":"0.090 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using 2 transposed matrices. [1]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using 2 transposed matrices. [2]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using 2 transposed matrices. [3]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using 2 transposed matrices. [4]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using 2 transposed matrices. [5]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"You can do matrix multiplication using 2 transposed matrices. [6]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Two 2-dimensional tensors.","code":["var a = Tensor.of(Double.class).withShape(K, M).andFill(A).mut.toType(type).T()","var b = Tensor.of(Double.class).withShape(N, K).andFill(B).mut.toType(type).T()"]}, + + {"kind":"when","text":"The \"matMul\" method is being called on \"a\" receiving \"b\"...","code":["println a.toString({it.isMultiline = true})","println b.toString({it.isMultiline = true})","var c = a.matMul(b)","println c.toString({it.isMultiline = true})"]}, + + {"kind":"then","text":"The result tensor contains the expected shape and values.","code":["c.shape == [M, N]","c.items == expectedC as List"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Float"],"A":["[1d, 1d]","[1d, 1d]","[4, 3, 2, 1]","[3, 3, 3, 3]","[-2, 1]","[-2, 1]","[1, 1]"],"B":["[2d]","[2d, 2d]","[-0.5, 1.5, 1, -2]","[0.5, 0.5, 0.5, 0.5]","[-1, -1.5]","[-1, -1.5]","[2]"],"M":["1","2","2","2","1","2","1"],"K":["2","1","2","2","2","1","2"],"N":["1","1","2","2","1","2","1"],"expectedC":["[ 4 ]","[ 2, 2 ]","[ 1.0, 0.0, 0.0, 1.0 ]","[ 3.0, 3.0, 3.0, 3.0 ]","[ 0.5 ]","[ 2.0, 3.0, -1.0, -1.5 ]","[ 4 ]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"random\" function/operation populates tensors randomly. [0]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(2,4).all(-42)"]}, + + {"kind":"and","text":"","code":["var f = Function.of('random(I[0])')"]}, + + {"kind":"expect","text":"","code":["t.itemType == type"]}, + + {"kind":"when","text":"","code":["var r = f(t)"]}, + + {"kind":"then","text":"","code":["r === t"]}, + + {"kind":"and","text":"","code":["( r.mut.data.get() as float[] ) == [1.0588075, 1.4017555, 1.2537496, -1.3897222, 1.0374786, 0.743316, 1.1692946, 1.3977289] as float[]"]}, + + {"kind":"when","text":"","code":["r = f.with(Arg.Seed.of(42)).call(t)"]}, + + {"kind":"then","text":"","code":["r === t"]}, + + {"kind":"and","text":"","code":["( r.mut.data.get() as float[] ) == [2.2639139286289724, -0.2763464310754003, 0.3719153742868813, -0.9768504740489802, 0.5154099159307729, 1.1608137295804097, 2.1905023977046336, -0.5449569795660217] as float[]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"random\" function/operation populates tensors randomly. [1]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(2,4).all(-42)"]}, + + {"kind":"and","text":"","code":["var f = Function.of('random(I[0])')"]}, + + {"kind":"expect","text":"","code":["t.itemType == type"]}, + + {"kind":"when","text":"","code":["var r = f(t)"]}, + + {"kind":"then","text":"","code":["r === t"]}, + + {"kind":"and","text":"","code":["( r.mut.data.get() as float[] ) == [1.0588075, 1.4017555, 1.2537496, -1.3897222, 1.0374786, 0.743316, 1.1692946, 1.3977289] as float[]"]}, + + {"kind":"when","text":"","code":["r = f.with(Arg.Seed.of(42)).call(t)"]}, + + {"kind":"then","text":"","code":["r === t"]}, + + {"kind":"and","text":"","code":["( r.mut.data.get() as float[] ) == [2.2639139286289724, -0.2763464310754003, 0.3719153742868813, -0.9768504740489802, 0.5154099159307729, 1.1608137295804097, 2.1905023977046336, -0.5449569795660217] as float[]"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The values of a randomly populated tensor seems to adhere to a gaussian distribution. [0]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(20, 40, 20).all(0)"]}, + + {"kind":"and","text":"","code":["var f = Function.of('random(I[0])')"]}, + + {"kind":"when","text":"","code":["f.with(Arg.Seed.of(-73L)).call(t)","var stats = new Statistics( t.mut.data.get() as double[] )"]}, + + {"kind":"then","text":"","code":["-0.05d < stats.mean && stats.mean < 0.05d"]}, + + {"kind":"and","text":"","code":["0.875d < stats.variance && stats.variance < 1.125d"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The values of a randomly populated tensor seems to adhere to a gaussian distribution. [1]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var t = Tensor.of(type).withShape(20, 40, 20).all(0)"]}, + + {"kind":"and","text":"","code":["var f = Function.of('random(I[0])')"]}, + + {"kind":"when","text":"","code":["f.with(Arg.Seed.of(-73L)).call(t)","var stats = new Statistics( t.mut.data.get() as double[] )"]}, + + {"kind":"then","text":"","code":["-0.05d < stats.mean && stats.mean < 0.05d"]}, + + {"kind":"and","text":"","code":["0.875d < stats.variance && stats.variance < 1.125d"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [0]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [1]", + "result":"PASS", + "duration":"0.060 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [2]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [3]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [4]", + "result":"PASS", + "duration":"0.056 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [5]", + "result":"PASS", + "duration":"0.058 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [6]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New method \"asFunction\" of String added at runtime is callable by groovy and also works. [7]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create two tensors and convert them to a desired type.","code":["var a = Tensor.of([1, 2], [3d, 2d]).mut.toType(type)","var b = Tensor.of([2, 1], [-1f, 4f]).mut.toType(type)"]}, + + {"kind":"and","text":"We prepare bindings for the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The tensors have the type...","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["var c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)"]}, + + {"kind":"and","text":"","code":["c.itemType == type"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Float","Float","Float","Float"],"code":["'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'","'\"I[0]xI[1]\".asFunction()([a, b])'","'\"I[0]xI[1]\"[a, b]'","'\"i0 x i1\"%[a, b]'","'\"i0\"%a'"],"expected":["\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(2x2):[-3.0, -2.0, 12.0, 8.0]\"","\"(1x2):[3.0, 2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [0]", + "result":"PASS", + "duration":"0.059 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [1]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [2]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [3]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [4]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [5]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [6]", + "result":"PASS", + "duration":"0.096 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [7]", + "result":"PASS", + "duration":"0.056 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [8]", + "result":"PASS", + "duration":"0.058 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [9]", + "result":"PASS", + "duration":"0.059 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [10]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [11]", + "result":"PASS", + "duration":"0.061 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [12]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [13]", + "result":"PASS", + "duration":"0.085 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [14]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [15]", + "result":"PASS", + "duration":"0.056 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [16]", + "result":"PASS", + "duration":"0.060 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [17]", + "result":"PASS", + "duration":"0.061 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [18]", + "result":"PASS", + "duration":"0.056 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"New operator methods added to \"SDK-types\" at runtime are callable by groovy and also work. [19]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().ndArrays({it.hasSlimNumbers=true})","Tensor a = Tensor.of(5d).mut.toType(type)","Tensor b = Tensor.of(3f).mut.toType(type)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"when","text":"...calling methods on types like Double and Integer that receive `Tensor` instances...","code":["Tensor c = new GroovyShell(binding).evaluate((code)) as Tensor"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().endsWith(\"[$expected]\")"]}, + + {"kind":"where","text":"","code":{"type":["Double","Double","Double","Double","Double","Double","Double","Double","Double","Double","Float","Float","Float","Float","Float","Float","Float","Float","Float","Float"],"code":["'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'","'(2+a)'","'(2*b)'","'(6/b)'","'(2**b)'","'(4-a)'","'(2.0+a)'","'(2.0*b)'","'(6.0/b)'","'(2.0**b)'","'(4.0-a)'"],"expected":["\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\"","\"7\"","\"6\"","\"2\"","\"8\"","\"-1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Overloaded operation methods on tensors produce expected results when called.", + "result":"PASS", + "duration":"0.061 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","Tensor a = Tensor.of(2d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Tensor c = Tensor.of(3d).setRqsGradient(true)"]}, + + {"kind":"expect","text":"","code":["( a / a ).toString().contains(\"[1]:(1.0)\")","( c % a ).toString().contains(\"[1]:(1.0)\")","( ( ( b / b ) ** c % a ) * 3 ).toString().contains(\"[1]:(3.0)\")","( a *= b ).toString().contains(\"(-8.0)\")","( a += -c ).toString().contains(\"(-11.0)\")","( a -= c ).toString().contains(\"(-14.0)\")","( a /= Tensor.of(2d) ).toString().contains(\"(-7.0)\")","( a %= c ).toString().contains(\"(-1.0)\")"]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Simple slice addition produces expected result. [0]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We set the experimental \"autoConvertToFloat\" flag to true.","code":["Neureka.get().backend().find(CLBackend).ifPresent({ it.settings.autoConvertToFloat=true })"]}, + + {"kind":"and","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)","Tensor a = Tensor.of([11, 11], 3d..19d).to( device )","Tensor x = a[1..-2,0..-1]","Tensor y = a[0..-3,0..-1]"]}, + + {"kind":"when","text":"","code":["Tensor t = x + y","String tAsStr = t.toString({it.setRowLimit(50)})"]}, + + {"kind":"then","text":"","code":["tAsStr.contains(\"(9x11):[17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, \" +"," \"26.0, 28.0, 30.0, 32.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, \" +"," \"26.0, 28.0, 30.0, 32.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, ... + 49 more]\")"]}, + + {"kind":"where","text":"The following data is being used for tensor instantiation :","code":{"device":["CPU.get()","Device.get(\"openCL\")"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Simple slice addition produces expected result. [1]", + "result":"PASS", + "duration":"0.050 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We set the experimental \"autoConvertToFloat\" flag to true.","code":["Neureka.get().backend().find(CLBackend).ifPresent({ it.settings.autoConvertToFloat=true })"]}, + + {"kind":"and","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)","Tensor a = Tensor.of([11, 11], 3d..19d).to( device )","Tensor x = a[1..-2,0..-1]","Tensor y = a[0..-3,0..-1]"]}, + + {"kind":"when","text":"","code":["Tensor t = x + y","String tAsStr = t.toString({it.setRowLimit(50)})"]}, + + {"kind":"then","text":"","code":["tAsStr.contains(\"(9x11):[17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, \" +"," \"26.0, 28.0, 30.0, 32.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, \" +"," \"26.0, 28.0, 30.0, 32.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, ... + 49 more]\")"]}, + + {"kind":"where","text":"The following data is being used for tensor instantiation :","code":{"device":["CPU.get()","Device.get(\"openCL\")"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [0]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [1]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [2]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [3]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [4]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [5]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [6]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [7]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [8]", + "result":"PASS", + "duration":"0.090 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [9]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [10]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [11]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [12]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [13]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [14]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [15]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [16]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [17]", + "result":"PASS", + "duration":"0.048 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [18]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [19]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [20]", + "result":"PASS", + "duration":"0.067 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [21]", + "result":"PASS", + "duration":"0.065 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [22]", + "result":"PASS", + "duration":"0.068 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [23]", + "result":"PASS", + "duration":"0.065 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [24]", + "result":"PASS", + "duration":"0.066 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [25]", + "result":"PASS", + "duration":"0.060 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [26]", + "result":"PASS", + "duration":"0.090 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [27]", + "result":"PASS", + "duration":"0.130 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [28]", + "result":"PASS", + "duration":"0.121 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [29]", + "result":"PASS", + "duration":"0.075 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [30]", + "result":"PASS", + "duration":"0.074 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [31]", + "result":"PASS", + "duration":"0.069 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [32]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [33]", + "result":"PASS", + "duration":"0.058 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [34]", + "result":"PASS", + "duration":"0.065 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [35]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [36]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Auto reshaping and broadcasting works and the result can be back propagated. [37]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = true }","Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)"]}, + + {"kind":"and","text":"","code":["String wValue = whichGrad"," ? \"8\" + ( bShape.inject(1, {x,y->x*y}) > 1 ? \", 9\" : \"\" )"," : \"1, 2, 3, 4\""]}, + + {"kind":"and","text":"","code":["def aShape = [2, 2]"]}, + + {"kind":"and","text":"","code":["Tensor a = Tensor.of(aShape, 1d..5d).setRqsGradient(!whichGrad).to(Device.get(device))","Tensor b = Tensor.of(bShape, 8d..9d).setRqsGradient(whichGrad).to(Device.get(device))"]}, + + {"kind":"and","text":"","code":["a.mut.toType(type)","b.mut.toType(type)"]}, + + {"kind":"and","text":"","code":["String wShape = ( whichGrad ? bShape : aShape ).join(\"x\")","Tensor w = ( whichGrad ? b : a )"]}, + + {"kind":"expect","text":"","code":["a.itemType == type || device == 'GPU' // The gpu backend will only be floats!","b.itemType == type || device == 'GPU' // This is because kernels only work on floats..."]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}).startsWith(\"[2x2]:($cValue)\")","w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:(null)\""]}, + + {"kind":"when","text":"","code":["c.backward(Tensor.of([2, 2], [5, -2, 7, 3]).mut.toType(type))"]}, + + {"kind":"then","text":"","code":["w.toString({it.hasSlimNumbers = true}) == \"[$wShape]:($wValue):g:($wGradient)\""]}, + + {"kind":"when","text":"","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(false)"]}, + + {"kind":"then","text":"","code":["c.toString({it.hasSlimNumbers = true}) == \"(2x2):[$cValue]\""]}, + + {"kind":"cleanup","text":"","code":["Neureka.get().backend.find(CLBackend).ifPresent { it.settings.autoConvertToFloat = false }"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'CPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'","'CPU'","'GPU'"],"type":["Double","Double","Float","Float","Double","Double","Float","Float","Double","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float","Double","Double","Float","Float"],"whichGrad":["false","false","false","false","false","false","false","false","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"],"bShape":["[1]","[1]","[1]","[1]","[1]","[1]","[1]","[1]","[2,1]","[2,1]","[1]","[1]","[1]","[1]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]","[1,2]","[1,2]","[1,2]","[1,2]","[2]","[2]","[2]","[2]"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x * y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> x - y }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }","{ x, y -> y - x }"],"cValue":["\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"8, 16, 24, 32\"","\"9, 10, 12, 13\"","\"9, 10, 12, 13\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 10, 11, 12\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"9, 11, 11, 13\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"-7, -7, -5, -5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\"","\"7, 7, 5, 5\""],"wGradient":["\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"5, -2, 7, 3\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"40, -16, 56, 24\"","\"3, 10\"","\"3, 10\"","\"13\"","\"13\"","\"13\"","\"13\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"-12, -1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\"","\"12, 1\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar broadcasting works across devices. [0]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var a = Tensor.of(type).withShape(3, 2).andFill(-4..4).to(Device.get(device))","var b = Tensor.of(type).withShape(1, 1).andFill(3).to(Device.get(device))"]}, + + {"kind":"expect","text":"","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString() == \"(3x2):[$cValue]\""]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'GPU'","'CPU'","'CPU'"],"type":["Double","Float","Float","Long","Integer"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }"],"cValue":["\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1, 0, 1, 2, 3, 4\"","\"-1, 0, 1, 2, 3, 4\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar broadcasting works across devices. [1]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var a = Tensor.of(type).withShape(3, 2).andFill(-4..4).to(Device.get(device))","var b = Tensor.of(type).withShape(1, 1).andFill(3).to(Device.get(device))"]}, + + {"kind":"expect","text":"","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString() == \"(3x2):[$cValue]\""]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'GPU'","'CPU'","'CPU'"],"type":["Double","Float","Float","Long","Integer"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }"],"cValue":["\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1, 0, 1, 2, 3, 4\"","\"-1, 0, 1, 2, 3, 4\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar broadcasting works across devices. [2]", + "result":"PASS", + "duration":"0.120 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var a = Tensor.of(type).withShape(3, 2).andFill(-4..4).to(Device.get(device))","var b = Tensor.of(type).withShape(1, 1).andFill(3).to(Device.get(device))"]}, + + {"kind":"expect","text":"","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString() == \"(3x2):[$cValue]\""]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'GPU'","'CPU'","'CPU'"],"type":["Double","Float","Float","Long","Integer"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }"],"cValue":["\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1, 0, 1, 2, 3, 4\"","\"-1, 0, 1, 2, 3, 4\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar broadcasting works across devices. [3]", + "result":"PASS", + "duration":"0.050 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var a = Tensor.of(type).withShape(3, 2).andFill(-4..4).to(Device.get(device))","var b = Tensor.of(type).withShape(1, 1).andFill(3).to(Device.get(device))"]}, + + {"kind":"expect","text":"","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString() == \"(3x2):[$cValue]\""]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'GPU'","'CPU'","'CPU'"],"type":["Double","Float","Float","Long","Integer"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }"],"cValue":["\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1, 0, 1, 2, 3, 4\"","\"-1, 0, 1, 2, 3, 4\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Scalar broadcasting works across devices. [4]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var a = Tensor.of(type).withShape(3, 2).andFill(-4..4).to(Device.get(device))","var b = Tensor.of(type).withShape(1, 1).andFill(3).to(Device.get(device))"]}, + + {"kind":"expect","text":"","code":["a.itemType == type","b.itemType == type"]}, + + {"kind":"when","text":"","code":["Tensor c = operation.apply(a, b)"]}, + + {"kind":"then","text":"","code":["c.toString() == \"(3x2):[$cValue]\""]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'CPU'","'GPU'","'CPU'","'CPU'"],"type":["Double","Float","Float","Long","Integer"],"operation":["{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }","{ x, y -> x + y }"],"cValue":["\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1.0, 0.0, 1.0, 2.0, 3.0, 4.0\"","\"-1, 0, 1, 2, 3, 4\"","\"-1, 0, 1, 2, 3, 4\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Operators \"+,*,**\" produce expected results with gradients which can be accessed via a \"Ig[0]\" Function instance", + "result":"PASS", + "duration":"0.059 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"Neurekas view is set to legacy and three tensors of which one requires gradients.","code":["Neureka.get().settings().view().getNDPrintSettings().setIsLegacy(true)","Tensor x = Tensor.of(3d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Tensor w = Tensor.of(2d)"]}, + + {"kind":"when","text":"","code":["when : Tensor y = ( (x+b)*w )**2"]}, + + {"kind":"then","text":"","code":["then : y.toString().contains(\"[1]:(4.0); ->d[1]:(-8.0)\")"]}, + + {"kind":"when","text":"","code":["when : y = ((x+b)*w)**2"]}, + + {"kind":"then","text":"","code":["then : y.toString().contains(\"[1]:(4.0); ->d[1]:(-8.0)\")"]}, + + {"kind":"and","text":"","code":["and : Neureka.get().settings().debug().setIsKeepingDerivativeTargetPayloads(true)"]}, + + {"kind":"when","text":"","code":["y.backward(Tensor.of(1d))"]}, + + {"kind":"and","text":"","code":["Tensor t2 = Tensor.of( \"Ig[0]\", [x] )","Tensor t1 = Tensor.of( \"Ig[0]\", [y] ) // The input does not have a gradient!"]}, + + {"kind":"then","text":"","code":["thrown(IllegalArgumentException)"]}, + + {"kind":"and","text":"","code":["t2.toString() == \"[1]:(-8.0)\""]}, + + {"kind":"and","text":"","code":["t2 == x.gradient.get()"]}, + + {"kind":"and","text":"","code":["and : Neureka.get().settings().debug().setIsKeepingDerivativeTargetPayloads(false)"]}, + + {"kind":"when","text":"","code":["Tensor[] trs = new Tensor[]{x}"]}, + + {"kind":"and","text":"","code":["def fun = Function.of(\"Ig[0]\", false)"]}, + + {"kind":"then","text":"","code":["fun(trs).toString() == \"[1]:(-8.0)\""]}, + + {"kind":"when","text":"","code":["trs[0] = y"]}, {"kind":"and","text":"","code":["fun = Function.of(\"Ig[0]\", false)"]}, - {"kind":"and","text":"","code":["fun(trs)"]}, + {"kind":"and","text":"","code":["fun(trs)"]}, + + {"kind":"then","text":"","code":["thrown(IllegalArgumentException)"]} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [0]", + "result":"PASS", + "duration":"0.062 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [1]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [2]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [3]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [4]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [5]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [6]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [7]", + "result":"PASS", + "duration":"0.655 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [8]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [9]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [10]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [11]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [12]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [13]", + "result":"PASS", + "duration":"0.055 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [14]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [15]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [16]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [17]", + "result":"PASS", + "duration":"0.054 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [18]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [19]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [20]", + "result":"PASS", + "duration":"0.057 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, - {"kind":"then","text":"","code":["thrown(IllegalArgumentException)"]} + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [21]", + "result":"PASS", + "duration":"0.053 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [22]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [23]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [24]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [25]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [26]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [27]", + "result":"PASS", + "duration":"0.052 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [28]", + "result":"PASS", + "duration":"0.088 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [29]", + "result":"PASS", + "duration":"0.051 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Activation functions work across types on slices and non sliced tensors. [30]", + "result":"PASS", + "duration":"0.050 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a function based on the provided expression.","code":["var func = Function.of(funExpression)"]}, + + {"kind":"and","text":"We create 2 tensors storing the same values, one sliced and the other a normal tensor.","code":["var t1 = Tensor.of(type).withShape(2, 3).andSeed(\"Tempeh\")","var t2 = Tensor.of(type).withShape(4, 5).all(0)[1..2, 1..3]","t2.mut[0..1, 0..2] = t1"]}, + + {"kind":"expect","text":"The types of both tensors should match what was provided during instantiation.","code":["t1.dataType == DataType.of(type)","t1.itemType == type","t2.dataType == DataType.of(type)","t2.itemType == type"]}, + + {"kind":"when","text":"We apply the function to both tensors...","code":["var result1 = func(t1)","var result2 = func(t2)"]}, + + {"kind":"then","text":"","code":["result1.itemType == type","result2.itemType == type"]}, + + {"kind":"and","text":"The data of the first (non slice) tensor should be as expected.","code":["result1.mut.data.get() == expected instanceof Map ? expected['r1'] : expected"]}, + + {"kind":"and","text":"As well the value of the slice tensor (Its data would be a sparse array).","code":["result2.items == expected instanceof Map ? expected['r2'] : expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float","Integer","Double","Float"],"funExpression":["'tanh(i0)'","'tanh(i0)'","'tanh(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0*i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0-i0)'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(i0)-i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)+i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0)/i0'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'relu(-i0-5)+i0*3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'abs(i0*10)%3'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'gaus(i0)*100%i0'","'random(i0)'","'random(i0)'"],"expected":["[-0.2608431635405718, -0.6400224689534015, -0.15255723053856546, 0.1566537867655921, 0.5489211983894932, -0.17031712209680225] as double[]","[-0.26084316, -0.64002246, -0.15255724, 0.15665378, 0.54892117, -0.17031713] as float[]","[-1, -1, 1, -1, 1, -1] as int[]","[-0.0027019706408068795, -0.008329762613111082, -0.001543641184315801, 0.15861207834235577, 0.6567031992927272, -0.001728424711189524] as double[]","[-0.0027019705, -0.008329763, -0.0015436412, 0.15861207, 0.6567032, -0.0017284247] as float[]","[-7156386, -18495716, 248181051, -13634228, 919305478, -15169971] as int[]","[0.07300645343782339, 0.6938494519078316, 0.023828281059158886, 0.025157791396081604, 0.43125909196130346, 0.029874519822505895] as double[]","[0.07300645, 0.6938495, 0.023828283, 0.025157789, 0.43125907, 0.02987452] as float[]","[988699588, -17870520, 141304729, 1260971300, 210951204, 1018550276] as int[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[]","[0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[]","[0, 0, 0, 0, 0, 0] as int[]","[0.26749509343988104, 0.8246464986979971, 0.1528204772472643, 0.0, 0.0, 0.17111404640776287] as double[]","[0.2674951, 0.82464653, 0.15282048, 0.0, 0.0, 0.17111404] as float[]","[708482220, 1831075919, 0, 1349788550, 0, 1501827147] as int[]","[0.0, 0.0, 0.0, 0.15702595755893223, 0.6501361672998, 0.0] as double[]","[0.0, 0.0, 0.0, 0.15702595174312592, 0.650136142373085, 0.0] as float[]","[0, 0, 245699240, 0, 910112423, 0] as int[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as double[]","[-1.0, -1.0, -1.0, -0.01, -0.01, -1.0] as float[]","[-1, -1, 0, -1, 0, -1] as int[]","[-0.857889221601257, -2.540599021320214, -0.5115487141104245, 0.42425011424364373, 1.9135425658852545, -0.5667989886456676] as double[]","[-0.85788924, -2.540599, -0.51154876, 0.4242501, 1.9135424, -0.566799] as float[]","[-1431277217, 595824021, 742061342, 1568121735, -1546243917, 1260973055] as int[]","[2.7019706408068793, 2.3297626131110825, 1.5436411843158009, 1.5861207834235578, 0.5670319929272729, 1.7284247111895241] as double[]","[2.7019706, 2.3297625, 1.5436412, 1.5861207, 0.56703186, 1.7284248] as float[]","[2, 0, 1, 1, 2, 1] as int[]","[0.011693048642643422, 0.8192993419907051, 0.08721424132459399, 0.1277867634692304, 0.6121424058303924, 0.09210498892686086] as double[]","[0.011690378, 0.81929654, 0.087213635, 0.12778962, 0.6121441, 0.09210494] as float[]","[0, 0, 0, 0, 0, 0] as int[]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.242396559265774, 0.23980663860290638, 0.4667980401594514, 0.0, 0.0, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as double[], 'r1':[2.242396559265774, 0.23980663860290638, 0.4667980401594514, -1.0840395336123059, 0.43090823203242123, 1.0381081218392283] as double[]]","['r2':[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2423966, 0.23980664, 0.46679804, 0.0, 0.0, -1.0840396, 0.43090823, 1.0381081, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] as float[], 'r1':[2.2423966, 0.23980664, 0.46679804, -1.0840396, 0.43090823, 1.0381081] as float[]]"]}} ], "problems":{"dataValues":[], "errors":[]} }, { - "id":"Activation functions work across types on slices and non sliced tensors.", + "id":"Activation functions work across types on slices and non sliced tensors. [31]", "result":"PASS", - "duration":"0.946 seconds", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -393,7 +4429,7 @@ { "id":"The transpose operation exposed by the \"T()\" method, supports autograd.", "result":"PASS", - "duration":"0.034 seconds", + "duration":"0.052 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Slicing_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Slicing_Spec.json index 142179be8..7f8f6bca7 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Slicing_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Slicing_Spec.json @@ -4,17 +4,17 @@ "narrative":"ND-Array data structures can be \"sliced\" in the sense\n that one can create a subset view of the underlying data inside a tensor\n through a new tensor instance...\n This can be a tedious and complicated procedure.\n Therefore a tensor should expose a various user friendly API for slicing which\n are also fit for various languages.\n This specification covers these APIs for tensor slicing.", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"9", + "runs":"14", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.013 seconds" + "duration":"0.036 seconds" }, "headers":["\n This specification covers the behavior of tensors when being sliced\n on multiple different device types using the slice builder API. \n "],"tags":{},"see":[], "features":[ { - "id":"When Slicing only one axis using the SliceBuilder API, the other axes will be sliced implicitly.", + "id":"When Slicing only one axis using the SliceBuilder API, the other axes will be sliced implicitly. [0]", "result":"PASS", "duration":"0.001 seconds", "iterations":{ @@ -43,7 +43,36 @@ }, { - "id":"The \"at\" method and the \"from\" / \"to\" methods can be mixed when slicing a tensor.", + "id":"When Slicing only one axis using the SliceBuilder API, the other axes will be sliced implicitly. [1]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A device could be found.","code":["if ( device == null ) return"]}, + + {"kind":"and","text":"The found device is also supported (Which might not always be the case for the OpenCLDevice).","code":["if ( device instanceof OpenCLDevice && !Neureka.get().canAccessOpenCLDevice() ) return"]}, + + {"kind":"and","text":"A 3 dimensional tensor which will be sliced.","code":["Tensor t = Tensor.of([2, 4, 3], -3d..7d)"]}, + + {"kind":"and","text":"Which will be placed on a given device:","code":["t.to(device)"]}, + + {"kind":"when","text":"Slicing axis 1 of the tensor using the \"from\" & \"to\" methods...","code":["Tensor s = t.slice()"," .axis(1).from(1).to(2)"," .get() // Note: Axis 0 and 2 will be sliced implicitly if not specified!"]}, + + {"kind":"then","text":"This will result in a slice which has 4 axis entries less than the original tensor.","code":["s.shape().sum() == t.shape().sum() - 2"]}, + + {"kind":"and","text":"This new slice will be displayed as follows when printed (with adjusted indent):","code":["s.toString().replace('\\n', '\\n'+\" \"*20) =="," \"\"\"(2x2x3):["," ["," [ 0.0 , 1.0 , 2.0 ],"," [ 3.0 , 4.0 , 5.0 ]"," ],"," ["," [ 1.0 , 2.0 , 3.0 ],"," [ 4.0 , 5.0 , 6.0 ]"," ]"," ]\"\"\""]}, + + {"kind":"and","text":"As already shown by the printed view, the tensor as the expected shape:","code":["s.shape() == [2, 2, 3]"]}, + + {"kind":"where","text":"This works both on the GPU as well as CPU of course.","code":{"device":["Device.get('gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The \"at\" method and the \"from\" / \"to\" methods can be mixed when slicing a tensor. [0]", "result":"PASS", "duration":"0", "iterations":{ @@ -72,7 +101,36 @@ }, { - "id":"A tensor can be sliced by passing ranges in the form of primitive arrays.", + "id":"The \"at\" method and the \"from\" / \"to\" methods can be mixed when slicing a tensor. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A device could be found.","code":["if ( device == null ) return"]}, + + {"kind":"and","text":"The found device is also supported (Which might not always be the case for the OpenCLDevice).","code":["if ( device instanceof OpenCLDevice && !Neureka.get().canAccessOpenCLDevice() ) return"]}, + + {"kind":"and","text":"A 3 dimensional tensor which will be sliced.","code":["Tensor t = Tensor.of([3, 3, 4], -11d..3d)"]}, + + {"kind":"and","text":"Which will be placed on a given device:","code":["t.to(device)"]}, + + {"kind":"when","text":"Slicing the tensor using both \"at\", \"from\"/\"to\" and an implicit full ranged slice for axis 1...","code":["Tensor s = t.slice()"," .axis(0).at(1)"," // Note: Axis 1 will be sliced implicitly if not specified!"," .axis(2).from(1).to(2)"," .get()"]}, + + {"kind":"then","text":"This will result in a slice which has 4 axis entries less than the original tensor.","code":["s.shape().sum() == t.shape().sum() - 4"]}, + + {"kind":"and","text":"This new slice will be displayed as follows when printed (with adjusted indent):","code":["s.toString().replace('\\n', '\\n'+\" \"*20) =="," \"\"\"(1x3x2):["," ["," [ 2.0 , 3.0 ],"," [ -9.0 , -8.0 ],"," [ -5.0 , -4.0 ]"," ]"," ]\"\"\""]}, + + {"kind":"and","text":"The \"at\" method sliced a single axis point whereas the \"from\" & \"to\" sliced from 1 to 2.","code":["s.shape() == [1, 3, 2]"]}, + + {"kind":"where","text":"This works both on the GPU as well as CPU of course.","code":{"device":["Device.get('gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor can be sliced by passing ranges in the form of primitive arrays. [0]", "result":"PASS", "duration":"0", "iterations":{ @@ -101,7 +159,36 @@ }, { - "id":"A tensor can be sliced by passing ranges in the form of lists (Groovy ranges).", + "id":"A tensor can be sliced by passing ranges in the form of primitive arrays. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A device could be found.","code":["if ( device == null ) return"]}, + + {"kind":"and","text":"The found device is also supported (Which might not always be the case for the OpenCLDevice).","code":["if ( device instanceof OpenCLDevice && !Neureka.get().canAccessOpenCLDevice() ) return"]}, + + {"kind":"and","text":"A 3 dimensional tensor which will be sliced.","code":["Tensor t = Tensor.of([3, 3, 4], -11..3)"]}, + + {"kind":"and","text":"Which will be placed on a given device:","code":["t.to(device)"]}, + + {"kind":"when","text":"Slicing the tensor using primitive int arrays...","code":["var s = t.getAt("," new int[]{1}, // Axis 0"," new int[]{0, 2}, // Axis 1"," new int[]{1, 2} // Axis 2"," )"]}, + + {"kind":"then","text":"This will result in a slice which has 4 axis entries less than the original tensor.","code":["s.shape().sum() == t.shape().sum() - 4"]}, + + {"kind":"and","text":"This new slice will have the expected shape and items:","code":["s.shape == [1, 3, 2]","s.items == [2, 3, -9, -8, -5, -4]"]}, + + {"kind":"and","text":"The the slice will have the following shape","code":["s.shape() == [1, 3, 2]"]}, + + {"kind":"where","text":"This works both on the GPU as well as CPU of course.","code":{"device":["Device.get('gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor can be sliced by passing ranges in the form of lists (Groovy ranges). [0]", "result":"PASS", "duration":"0", "iterations":{ @@ -128,9 +215,36 @@ }, { - "id":"The slice builder also supports slicing with custom step sizes.", + "id":"A tensor can be sliced by passing ranges in the form of lists (Groovy ranges). [1]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A device could be found.","code":["if ( device == null ) return"]}, + + {"kind":"and","text":"The found device is also supported (Which might not always be the case for the OpenCLDevice).","code":["if ( device instanceof OpenCLDevice && !Neureka.get().canAccessOpenCLDevice() ) return"]}, + + {"kind":"and","text":"A 3 dimensional tensor which will be sliced.","code":["var t = Tensor.of([3, 3, 4], -11..3)"]}, + + {"kind":"and","text":"Which will be placed on a given device:","code":["t.to(device)"]}, + + {"kind":"when","text":"Slicing the tensor using lists of integers generated by the Groovy range operator..","code":["var s = t[1, 0..2, 1..2]"]}, + + {"kind":"then","text":"This will result in a slice which has 4 axis entries less than the original tensor.","code":["s.shape().sum() == t.shape().sum() - 4"]}, + + {"kind":"and","text":"This new slice will have the expected shape and items:","code":["s.shape == [1, 3, 2]","s.items == [2, 3, -9, -8, -5, -4]"]}, + + {"kind":"where","text":"This works both on the GPU as well as CPU of course.","code":{"device":["Device.get('gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The slice builder also supports slicing with custom step sizes. [0]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -157,12 +271,41 @@ }, { - "id":"Slicing is also a Function with autograd support!", + "id":"The slice builder also supports slicing with custom step sizes. [1]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"A device could be found.","code":["if ( device == null ) return"]}, + + {"kind":"and","text":"The found device is also supported (Which might not always be the case for the OpenCLDevice).","code":["if ( device instanceof OpenCLDevice && !Neureka.get().canAccessOpenCLDevice() ) return"]}, + + {"kind":"and","text":"A 3 dimensional tensor which will be sliced.","code":["Tensor t = Tensor.of([3, 3, 4], -11d..3d)"]}, + + {"kind":"and","text":"Which will be placed on a given device:","code":["t.to(device)"]}, + + {"kind":"when","text":"Slicing the tensor using lists of integers generated by the Groovy range operator..","code":["Tensor s = t.slice()"," .axis(0).at(0)"," .axis(1).at(0)"," .axis(2).from(0).to(3).step(2)"," .get()"]}, + + {"kind":"then","text":"This will result in a slice which has 4 axis entries less than the original tensor.","code":["s.shape().sum() == t.shape().sum() - 6"]}, + + {"kind":"and","text":"This new slice will be displayed as follows when printed:","code":["s.toString() == \"(1x1x2):[\\n\" +"," \" [\\n\" +"," \" [ -11.0, -9.0 ]\\n\" +"," \" ]\\n\" +"," \"]\""]}, + + {"kind":"and","text":"The the slice will have the following shape","code":["s.shape() == [1, 1, 2]"]}, + + {"kind":"where","text":"This works both on the GPU as well as CPU of course.","code":{"device":["Device.get('gpu')","CPU.get()"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Slicing is also a Function with autograd support!", + "result":"PASS", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"A 2 dimensional tensor requiring gradients.","code":["var t = Tensor.ofBytes().withShape(4, 4).andFill(-1, 7, 3).setRqsGradient(true)"]}, @@ -182,7 +325,7 @@ { "id":"Normal slicing will try to do autograd.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -201,7 +344,7 @@ { "id":"We can avoid autograd when slicing by using the \"detached\" instead of the \"get\" method.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -222,7 +365,7 @@ { "id":"We can slice a scalar tensor from a larger tensor of rank 4.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_State_Spec.json b/docs/spock/reports/ut.tensors.Tensor_State_Spec.json index da37ab732..271ccb116 100644 --- a/docs/spock/reports/ut.tensors.Tensor_State_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_State_Spec.json @@ -1,20 +1,88 @@ { "className":"ut.tensors.Tensor_State_Spec", "title":"The Tensor Initialization and State Specification", - "narrative":"This specification defines the expected states of freshly instantiated\n and initialized tensors.\n After a tensor was created successfully we expect it\n to have certain properties like a shape, rank, type and data array\n among other things.", + "narrative":"This specification defines the expected states of freshly instantiated\n and initialized tensors.\n After a tensor was created successfully we expect it \n to have certain properties like a shape, rank, type and data array\n among other things.", "subjects":[], "statistics":{ - "runs":"7", + "runs":"19", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.016 seconds" + "duration":"0.052 seconds" }, "headers":[" \n Note: This specification is a little older, meaning initially it was not written with \n the intend to be read as living documentation, but rather as a unit test...\n "],"tags":{},"see":[], "features":[ { - "id":"A tensor can be instantiated from a item type class and nested lists.", + "id":"A tensor can be instantiated from a item type class and nested lists. [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We instantiate a tensor using a type and a list of things (or list of list of things, or..).","code":["var t = Tensor.of(type, list)"]}, + + {"kind":"expect","text":"The tensor has the item type, shape and data array!","code":["t.itemType == type","t.shape() == shape","t.mut.data.get() == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Byte","Byte","String"],"list":["[1,2,1]","[5, -4]","[3, 4]","[[3], [4]]","[['3', '4']]"],"shape":["[3]","[2]","[2]","[2, 1]","[1, 2]"],"expected":["[1, 2, 1] as double[]","[5, -4] as float[]","[3, 4] as byte[]","[3, 4] as byte[]","['3', '4'] as String[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor can be instantiated from a item type class and nested lists. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We instantiate a tensor using a type and a list of things (or list of list of things, or..).","code":["var t = Tensor.of(type, list)"]}, + + {"kind":"expect","text":"The tensor has the item type, shape and data array!","code":["t.itemType == type","t.shape() == shape","t.mut.data.get() == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Byte","Byte","String"],"list":["[1,2,1]","[5, -4]","[3, 4]","[[3], [4]]","[['3', '4']]"],"shape":["[3]","[2]","[2]","[2, 1]","[1, 2]"],"expected":["[1, 2, 1] as double[]","[5, -4] as float[]","[3, 4] as byte[]","[3, 4] as byte[]","['3', '4'] as String[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor can be instantiated from a item type class and nested lists. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We instantiate a tensor using a type and a list of things (or list of list of things, or..).","code":["var t = Tensor.of(type, list)"]}, + + {"kind":"expect","text":"The tensor has the item type, shape and data array!","code":["t.itemType == type","t.shape() == shape","t.mut.data.get() == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Byte","Byte","String"],"list":["[1,2,1]","[5, -4]","[3, 4]","[[3], [4]]","[['3', '4']]"],"shape":["[3]","[2]","[2]","[2, 1]","[1, 2]"],"expected":["[1, 2, 1] as double[]","[5, -4] as float[]","[3, 4] as byte[]","[3, 4] as byte[]","['3', '4'] as String[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor can be instantiated from a item type class and nested lists. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We instantiate a tensor using a type and a list of things (or list of list of things, or..).","code":["var t = Tensor.of(type, list)"]}, + + {"kind":"expect","text":"The tensor has the item type, shape and data array!","code":["t.itemType == type","t.shape() == shape","t.mut.data.get() == expected"]}, + + {"kind":"where","text":"","code":{"type":["Double","Float","Byte","Byte","String"],"list":["[1,2,1]","[5, -4]","[3, 4]","[[3], [4]]","[['3', '4']]"],"shape":["[3]","[2]","[2]","[2, 1]","[1, 2]"],"expected":["[1, 2, 1] as double[]","[5, -4] as float[]","[3, 4] as byte[]","[3, 4] as byte[]","['3', '4'] as String[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"A tensor can be instantiated from a item type class and nested lists. [4]", "result":"PASS", "duration":"0", "iterations":{ @@ -33,7 +101,7 @@ { "id":"Tensors as String can be formatted on an entry based level.", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -54,7 +122,7 @@ { "id":"Numeric tensors as String can be formatted on an entry based level.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -69,7 +137,88 @@ }, { - "id":"Tensors as String can be formatted depending on shape.", + "id":"Tensors as String can be formatted depending on shape. [0]", + "result":"PASS", + "duration":"0.010 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We configure a NDPrintSettings object.","code":["def settings ="," Neureka.get()"," .settings()"," .view()"," .getNDPrintSettings()"," .clone()"," .setRowLimit( mode.contains( \"s\" ) ? 3 : 32 )"," .setIsScientific( mode.contains( \"c\" ) )"," .setIsMultiline( mode.contains( \"f\" ) )"," .setHasGradient( mode.contains( \"g\" ) )"," .setCellSize( mode.contains( \"p\" ) ? 6 : mode.contains( \"f\" ) ? 2 : 1 )"," .setHasValue( !(mode.contains( \"shp\" ) || mode.contains(\"shape\")) )"," .setHasRecursiveGraph( mode.contains( \"r\" ) )"," .setHasDerivatives( mode.contains( \"d\" ) )"," .setHasShape( !mode.contains( \"v\" ) )"," .setIsCellBound( mode .contains( \"b\" ) )"," .setPostfix( \"\" )"," .setPrefix( \"\" )"," .setHasSlimNumbers( false )"]}, + + {"kind":"and","text":"Four tensors of various data types:","code":["Tensor t1 = Tensor.of( Float.class, shape, -4f..5f ).set( Tensor.of( shape, -7f..3f ) )","Tensor t2 = Tensor.of( Double.class, shape, -4d..5d ).set( Tensor.of( shape, -7d..3d ) )","Tensor t3 = Tensor.of( Integer.class, shape, -4..5 ).set( Tensor.of( shape, -7..3 ) )","Tensor t4 = Tensor.of( Short.class, shape, (-4 as short)..(5 as short) ).set( Tensor.of( shape, (-7 as short)..(3 as short) ) )","Tensor t5 = Tensor.of( Byte.class, shape, (-4 as byte )..(5 as byte ) ).set( Tensor.of( shape, (-7 as byte)..(3 as byte) ) )"]}, + + {"kind":"expect","text":"The first tensor has the expected internals and produces the correct String representation.","code":["t1.toString(settings) == expected","t1.dataType == DataType.of( Float.class )","t1.mut.data.get() instanceof float[]"]}, + + {"kind":"and","text":"The second tensor has the expected internals and produces the correct String representation.","code":["t2.toString(settings) == expected","t2.dataType == DataType.of( Double.class )","t2.mut.data.get() instanceof double[]"]}, + + {"kind":"and","text":"The third tensor has the expected internals and produces the correct String representation.","code":["t3.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t3.dataType == DataType.of( Integer.class )","t3.mut.data.get() instanceof int[]"]}, + + {"kind":"and","text":"The fourth tensor has the expected internals and produces the correct String representation.","code":["t4.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t4.dataType == DataType.of( Short.class )","t4.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The fifth tensor has the expected internals and produces the correct String representation.","code":["t5.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t5.dataType == DataType.of( Byte.class )","t5.mut.data.get() instanceof byte[]"]}, + + {"kind":"where","text":"The print configurations codes \"mode\", a common shape and expected String representation will be supplied:","code":{"shape":["[2,3]","[2,3]","[3,2]","[2,3,4]","[2,2,3,4]","[2, 70]","[2, 100]","[70, 2]"],"mode":["\"fap\"","\"fa\"","\"fp\"","\"fp\"","\"fp\"","\"f\"","\"f\"","\"f\""],"expected":["\"(2x3):[\\n [ -4.0 , -3.0 , -2.0 ],\\n [ -1.0 , 0.0 , 1.0 ]\\n]\"","\"(2x3):[\\n [ -4.0, -3.0, -2.0 ],\\n [ -1.0, 0.0, 1.0 ]\\n]\"","\"(3x2):[\\n [ -4.0 , -3.0 ],\\n [ -2.0 , -1.0 ],\\n [ 0.0 , 1.0 ]\\n]\"","\"(2x3x4):[\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n]\"","\"(2x2x3x4):[\\n [\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n ],\\n [\\n [\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ],\\n [ -2.0 , -1.0 , 0.0 , 1.0 ]\\n ],\\n [\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ]\\n ]\\n ]\\n]\"","\"(2x70):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(2x100):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(70x2):[\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n ... 38 more ...\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ]\\n]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors as String can be formatted depending on shape. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We configure a NDPrintSettings object.","code":["def settings ="," Neureka.get()"," .settings()"," .view()"," .getNDPrintSettings()"," .clone()"," .setRowLimit( mode.contains( \"s\" ) ? 3 : 32 )"," .setIsScientific( mode.contains( \"c\" ) )"," .setIsMultiline( mode.contains( \"f\" ) )"," .setHasGradient( mode.contains( \"g\" ) )"," .setCellSize( mode.contains( \"p\" ) ? 6 : mode.contains( \"f\" ) ? 2 : 1 )"," .setHasValue( !(mode.contains( \"shp\" ) || mode.contains(\"shape\")) )"," .setHasRecursiveGraph( mode.contains( \"r\" ) )"," .setHasDerivatives( mode.contains( \"d\" ) )"," .setHasShape( !mode.contains( \"v\" ) )"," .setIsCellBound( mode .contains( \"b\" ) )"," .setPostfix( \"\" )"," .setPrefix( \"\" )"," .setHasSlimNumbers( false )"]}, + + {"kind":"and","text":"Four tensors of various data types:","code":["Tensor t1 = Tensor.of( Float.class, shape, -4f..5f ).set( Tensor.of( shape, -7f..3f ) )","Tensor t2 = Tensor.of( Double.class, shape, -4d..5d ).set( Tensor.of( shape, -7d..3d ) )","Tensor t3 = Tensor.of( Integer.class, shape, -4..5 ).set( Tensor.of( shape, -7..3 ) )","Tensor t4 = Tensor.of( Short.class, shape, (-4 as short)..(5 as short) ).set( Tensor.of( shape, (-7 as short)..(3 as short) ) )","Tensor t5 = Tensor.of( Byte.class, shape, (-4 as byte )..(5 as byte ) ).set( Tensor.of( shape, (-7 as byte)..(3 as byte) ) )"]}, + + {"kind":"expect","text":"The first tensor has the expected internals and produces the correct String representation.","code":["t1.toString(settings) == expected","t1.dataType == DataType.of( Float.class )","t1.mut.data.get() instanceof float[]"]}, + + {"kind":"and","text":"The second tensor has the expected internals and produces the correct String representation.","code":["t2.toString(settings) == expected","t2.dataType == DataType.of( Double.class )","t2.mut.data.get() instanceof double[]"]}, + + {"kind":"and","text":"The third tensor has the expected internals and produces the correct String representation.","code":["t3.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t3.dataType == DataType.of( Integer.class )","t3.mut.data.get() instanceof int[]"]}, + + {"kind":"and","text":"The fourth tensor has the expected internals and produces the correct String representation.","code":["t4.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t4.dataType == DataType.of( Short.class )","t4.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The fifth tensor has the expected internals and produces the correct String representation.","code":["t5.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t5.dataType == DataType.of( Byte.class )","t5.mut.data.get() instanceof byte[]"]}, + + {"kind":"where","text":"The print configurations codes \"mode\", a common shape and expected String representation will be supplied:","code":{"shape":["[2,3]","[2,3]","[3,2]","[2,3,4]","[2,2,3,4]","[2, 70]","[2, 100]","[70, 2]"],"mode":["\"fap\"","\"fa\"","\"fp\"","\"fp\"","\"fp\"","\"f\"","\"f\"","\"f\""],"expected":["\"(2x3):[\\n [ -4.0 , -3.0 , -2.0 ],\\n [ -1.0 , 0.0 , 1.0 ]\\n]\"","\"(2x3):[\\n [ -4.0, -3.0, -2.0 ],\\n [ -1.0, 0.0, 1.0 ]\\n]\"","\"(3x2):[\\n [ -4.0 , -3.0 ],\\n [ -2.0 , -1.0 ],\\n [ 0.0 , 1.0 ]\\n]\"","\"(2x3x4):[\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n]\"","\"(2x2x3x4):[\\n [\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n ],\\n [\\n [\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ],\\n [ -2.0 , -1.0 , 0.0 , 1.0 ]\\n ],\\n [\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ]\\n ]\\n ]\\n]\"","\"(2x70):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(2x100):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(70x2):[\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n ... 38 more ...\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ]\\n]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors as String can be formatted depending on shape. [2]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We configure a NDPrintSettings object.","code":["def settings ="," Neureka.get()"," .settings()"," .view()"," .getNDPrintSettings()"," .clone()"," .setRowLimit( mode.contains( \"s\" ) ? 3 : 32 )"," .setIsScientific( mode.contains( \"c\" ) )"," .setIsMultiline( mode.contains( \"f\" ) )"," .setHasGradient( mode.contains( \"g\" ) )"," .setCellSize( mode.contains( \"p\" ) ? 6 : mode.contains( \"f\" ) ? 2 : 1 )"," .setHasValue( !(mode.contains( \"shp\" ) || mode.contains(\"shape\")) )"," .setHasRecursiveGraph( mode.contains( \"r\" ) )"," .setHasDerivatives( mode.contains( \"d\" ) )"," .setHasShape( !mode.contains( \"v\" ) )"," .setIsCellBound( mode .contains( \"b\" ) )"," .setPostfix( \"\" )"," .setPrefix( \"\" )"," .setHasSlimNumbers( false )"]}, + + {"kind":"and","text":"Four tensors of various data types:","code":["Tensor t1 = Tensor.of( Float.class, shape, -4f..5f ).set( Tensor.of( shape, -7f..3f ) )","Tensor t2 = Tensor.of( Double.class, shape, -4d..5d ).set( Tensor.of( shape, -7d..3d ) )","Tensor t3 = Tensor.of( Integer.class, shape, -4..5 ).set( Tensor.of( shape, -7..3 ) )","Tensor t4 = Tensor.of( Short.class, shape, (-4 as short)..(5 as short) ).set( Tensor.of( shape, (-7 as short)..(3 as short) ) )","Tensor t5 = Tensor.of( Byte.class, shape, (-4 as byte )..(5 as byte ) ).set( Tensor.of( shape, (-7 as byte)..(3 as byte) ) )"]}, + + {"kind":"expect","text":"The first tensor has the expected internals and produces the correct String representation.","code":["t1.toString(settings) == expected","t1.dataType == DataType.of( Float.class )","t1.mut.data.get() instanceof float[]"]}, + + {"kind":"and","text":"The second tensor has the expected internals and produces the correct String representation.","code":["t2.toString(settings) == expected","t2.dataType == DataType.of( Double.class )","t2.mut.data.get() instanceof double[]"]}, + + {"kind":"and","text":"The third tensor has the expected internals and produces the correct String representation.","code":["t3.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t3.dataType == DataType.of( Integer.class )","t3.mut.data.get() instanceof int[]"]}, + + {"kind":"and","text":"The fourth tensor has the expected internals and produces the correct String representation.","code":["t4.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t4.dataType == DataType.of( Short.class )","t4.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The fifth tensor has the expected internals and produces the correct String representation.","code":["t5.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t5.dataType == DataType.of( Byte.class )","t5.mut.data.get() instanceof byte[]"]}, + + {"kind":"where","text":"The print configurations codes \"mode\", a common shape and expected String representation will be supplied:","code":{"shape":["[2,3]","[2,3]","[3,2]","[2,3,4]","[2,2,3,4]","[2, 70]","[2, 100]","[70, 2]"],"mode":["\"fap\"","\"fa\"","\"fp\"","\"fp\"","\"fp\"","\"f\"","\"f\"","\"f\""],"expected":["\"(2x3):[\\n [ -4.0 , -3.0 , -2.0 ],\\n [ -1.0 , 0.0 , 1.0 ]\\n]\"","\"(2x3):[\\n [ -4.0, -3.0, -2.0 ],\\n [ -1.0, 0.0, 1.0 ]\\n]\"","\"(3x2):[\\n [ -4.0 , -3.0 ],\\n [ -2.0 , -1.0 ],\\n [ 0.0 , 1.0 ]\\n]\"","\"(2x3x4):[\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n]\"","\"(2x2x3x4):[\\n [\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n ],\\n [\\n [\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ],\\n [ -2.0 , -1.0 , 0.0 , 1.0 ]\\n ],\\n [\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ]\\n ]\\n ]\\n]\"","\"(2x70):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(2x100):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(70x2):[\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n ... 38 more ...\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ]\\n]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors as String can be formatted depending on shape. [3]", "result":"PASS", "duration":"0.002 seconds", "iterations":{ @@ -95,10 +244,118 @@ "problems":{"dataValues":[], "errors":[]} }, + { + "id":"Tensors as String can be formatted depending on shape. [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We configure a NDPrintSettings object.","code":["def settings ="," Neureka.get()"," .settings()"," .view()"," .getNDPrintSettings()"," .clone()"," .setRowLimit( mode.contains( \"s\" ) ? 3 : 32 )"," .setIsScientific( mode.contains( \"c\" ) )"," .setIsMultiline( mode.contains( \"f\" ) )"," .setHasGradient( mode.contains( \"g\" ) )"," .setCellSize( mode.contains( \"p\" ) ? 6 : mode.contains( \"f\" ) ? 2 : 1 )"," .setHasValue( !(mode.contains( \"shp\" ) || mode.contains(\"shape\")) )"," .setHasRecursiveGraph( mode.contains( \"r\" ) )"," .setHasDerivatives( mode.contains( \"d\" ) )"," .setHasShape( !mode.contains( \"v\" ) )"," .setIsCellBound( mode .contains( \"b\" ) )"," .setPostfix( \"\" )"," .setPrefix( \"\" )"," .setHasSlimNumbers( false )"]}, + + {"kind":"and","text":"Four tensors of various data types:","code":["Tensor t1 = Tensor.of( Float.class, shape, -4f..5f ).set( Tensor.of( shape, -7f..3f ) )","Tensor t2 = Tensor.of( Double.class, shape, -4d..5d ).set( Tensor.of( shape, -7d..3d ) )","Tensor t3 = Tensor.of( Integer.class, shape, -4..5 ).set( Tensor.of( shape, -7..3 ) )","Tensor t4 = Tensor.of( Short.class, shape, (-4 as short)..(5 as short) ).set( Tensor.of( shape, (-7 as short)..(3 as short) ) )","Tensor t5 = Tensor.of( Byte.class, shape, (-4 as byte )..(5 as byte ) ).set( Tensor.of( shape, (-7 as byte)..(3 as byte) ) )"]}, + + {"kind":"expect","text":"The first tensor has the expected internals and produces the correct String representation.","code":["t1.toString(settings) == expected","t1.dataType == DataType.of( Float.class )","t1.mut.data.get() instanceof float[]"]}, + + {"kind":"and","text":"The second tensor has the expected internals and produces the correct String representation.","code":["t2.toString(settings) == expected","t2.dataType == DataType.of( Double.class )","t2.mut.data.get() instanceof double[]"]}, + + {"kind":"and","text":"The third tensor has the expected internals and produces the correct String representation.","code":["t3.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t3.dataType == DataType.of( Integer.class )","t3.mut.data.get() instanceof int[]"]}, + + {"kind":"and","text":"The fourth tensor has the expected internals and produces the correct String representation.","code":["t4.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t4.dataType == DataType.of( Short.class )","t4.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The fifth tensor has the expected internals and produces the correct String representation.","code":["t5.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t5.dataType == DataType.of( Byte.class )","t5.mut.data.get() instanceof byte[]"]}, + + {"kind":"where","text":"The print configurations codes \"mode\", a common shape and expected String representation will be supplied:","code":{"shape":["[2,3]","[2,3]","[3,2]","[2,3,4]","[2,2,3,4]","[2, 70]","[2, 100]","[70, 2]"],"mode":["\"fap\"","\"fa\"","\"fp\"","\"fp\"","\"fp\"","\"f\"","\"f\"","\"f\""],"expected":["\"(2x3):[\\n [ -4.0 , -3.0 , -2.0 ],\\n [ -1.0 , 0.0 , 1.0 ]\\n]\"","\"(2x3):[\\n [ -4.0, -3.0, -2.0 ],\\n [ -1.0, 0.0, 1.0 ]\\n]\"","\"(3x2):[\\n [ -4.0 , -3.0 ],\\n [ -2.0 , -1.0 ],\\n [ 0.0 , 1.0 ]\\n]\"","\"(2x3x4):[\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n]\"","\"(2x2x3x4):[\\n [\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n ],\\n [\\n [\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ],\\n [ -2.0 , -1.0 , 0.0 , 1.0 ]\\n ],\\n [\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ]\\n ]\\n ]\\n]\"","\"(2x70):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(2x100):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(70x2):[\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n ... 38 more ...\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ]\\n]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors as String can be formatted depending on shape. [5]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We configure a NDPrintSettings object.","code":["def settings ="," Neureka.get()"," .settings()"," .view()"," .getNDPrintSettings()"," .clone()"," .setRowLimit( mode.contains( \"s\" ) ? 3 : 32 )"," .setIsScientific( mode.contains( \"c\" ) )"," .setIsMultiline( mode.contains( \"f\" ) )"," .setHasGradient( mode.contains( \"g\" ) )"," .setCellSize( mode.contains( \"p\" ) ? 6 : mode.contains( \"f\" ) ? 2 : 1 )"," .setHasValue( !(mode.contains( \"shp\" ) || mode.contains(\"shape\")) )"," .setHasRecursiveGraph( mode.contains( \"r\" ) )"," .setHasDerivatives( mode.contains( \"d\" ) )"," .setHasShape( !mode.contains( \"v\" ) )"," .setIsCellBound( mode .contains( \"b\" ) )"," .setPostfix( \"\" )"," .setPrefix( \"\" )"," .setHasSlimNumbers( false )"]}, + + {"kind":"and","text":"Four tensors of various data types:","code":["Tensor t1 = Tensor.of( Float.class, shape, -4f..5f ).set( Tensor.of( shape, -7f..3f ) )","Tensor t2 = Tensor.of( Double.class, shape, -4d..5d ).set( Tensor.of( shape, -7d..3d ) )","Tensor t3 = Tensor.of( Integer.class, shape, -4..5 ).set( Tensor.of( shape, -7..3 ) )","Tensor t4 = Tensor.of( Short.class, shape, (-4 as short)..(5 as short) ).set( Tensor.of( shape, (-7 as short)..(3 as short) ) )","Tensor t5 = Tensor.of( Byte.class, shape, (-4 as byte )..(5 as byte ) ).set( Tensor.of( shape, (-7 as byte)..(3 as byte) ) )"]}, + + {"kind":"expect","text":"The first tensor has the expected internals and produces the correct String representation.","code":["t1.toString(settings) == expected","t1.dataType == DataType.of( Float.class )","t1.mut.data.get() instanceof float[]"]}, + + {"kind":"and","text":"The second tensor has the expected internals and produces the correct String representation.","code":["t2.toString(settings) == expected","t2.dataType == DataType.of( Double.class )","t2.mut.data.get() instanceof double[]"]}, + + {"kind":"and","text":"The third tensor has the expected internals and produces the correct String representation.","code":["t3.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t3.dataType == DataType.of( Integer.class )","t3.mut.data.get() instanceof int[]"]}, + + {"kind":"and","text":"The fourth tensor has the expected internals and produces the correct String representation.","code":["t4.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t4.dataType == DataType.of( Short.class )","t4.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The fifth tensor has the expected internals and produces the correct String representation.","code":["t5.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t5.dataType == DataType.of( Byte.class )","t5.mut.data.get() instanceof byte[]"]}, + + {"kind":"where","text":"The print configurations codes \"mode\", a common shape and expected String representation will be supplied:","code":{"shape":["[2,3]","[2,3]","[3,2]","[2,3,4]","[2,2,3,4]","[2, 70]","[2, 100]","[70, 2]"],"mode":["\"fap\"","\"fa\"","\"fp\"","\"fp\"","\"fp\"","\"f\"","\"f\"","\"f\""],"expected":["\"(2x3):[\\n [ -4.0 , -3.0 , -2.0 ],\\n [ -1.0 , 0.0 , 1.0 ]\\n]\"","\"(2x3):[\\n [ -4.0, -3.0, -2.0 ],\\n [ -1.0, 0.0, 1.0 ]\\n]\"","\"(3x2):[\\n [ -4.0 , -3.0 ],\\n [ -2.0 , -1.0 ],\\n [ 0.0 , 1.0 ]\\n]\"","\"(2x3x4):[\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n]\"","\"(2x2x3x4):[\\n [\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n ],\\n [\\n [\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ],\\n [ -2.0 , -1.0 , 0.0 , 1.0 ]\\n ],\\n [\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ]\\n ]\\n ]\\n]\"","\"(2x70):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(2x100):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(70x2):[\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n ... 38 more ...\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ]\\n]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors as String can be formatted depending on shape. [6]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We configure a NDPrintSettings object.","code":["def settings ="," Neureka.get()"," .settings()"," .view()"," .getNDPrintSettings()"," .clone()"," .setRowLimit( mode.contains( \"s\" ) ? 3 : 32 )"," .setIsScientific( mode.contains( \"c\" ) )"," .setIsMultiline( mode.contains( \"f\" ) )"," .setHasGradient( mode.contains( \"g\" ) )"," .setCellSize( mode.contains( \"p\" ) ? 6 : mode.contains( \"f\" ) ? 2 : 1 )"," .setHasValue( !(mode.contains( \"shp\" ) || mode.contains(\"shape\")) )"," .setHasRecursiveGraph( mode.contains( \"r\" ) )"," .setHasDerivatives( mode.contains( \"d\" ) )"," .setHasShape( !mode.contains( \"v\" ) )"," .setIsCellBound( mode .contains( \"b\" ) )"," .setPostfix( \"\" )"," .setPrefix( \"\" )"," .setHasSlimNumbers( false )"]}, + + {"kind":"and","text":"Four tensors of various data types:","code":["Tensor t1 = Tensor.of( Float.class, shape, -4f..5f ).set( Tensor.of( shape, -7f..3f ) )","Tensor t2 = Tensor.of( Double.class, shape, -4d..5d ).set( Tensor.of( shape, -7d..3d ) )","Tensor t3 = Tensor.of( Integer.class, shape, -4..5 ).set( Tensor.of( shape, -7..3 ) )","Tensor t4 = Tensor.of( Short.class, shape, (-4 as short)..(5 as short) ).set( Tensor.of( shape, (-7 as short)..(3 as short) ) )","Tensor t5 = Tensor.of( Byte.class, shape, (-4 as byte )..(5 as byte ) ).set( Tensor.of( shape, (-7 as byte)..(3 as byte) ) )"]}, + + {"kind":"expect","text":"The first tensor has the expected internals and produces the correct String representation.","code":["t1.toString(settings) == expected","t1.dataType == DataType.of( Float.class )","t1.mut.data.get() instanceof float[]"]}, + + {"kind":"and","text":"The second tensor has the expected internals and produces the correct String representation.","code":["t2.toString(settings) == expected","t2.dataType == DataType.of( Double.class )","t2.mut.data.get() instanceof double[]"]}, + + {"kind":"and","text":"The third tensor has the expected internals and produces the correct String representation.","code":["t3.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t3.dataType == DataType.of( Integer.class )","t3.mut.data.get() instanceof int[]"]}, + + {"kind":"and","text":"The fourth tensor has the expected internals and produces the correct String representation.","code":["t4.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t4.dataType == DataType.of( Short.class )","t4.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The fifth tensor has the expected internals and produces the correct String representation.","code":["t5.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t5.dataType == DataType.of( Byte.class )","t5.mut.data.get() instanceof byte[]"]}, + + {"kind":"where","text":"The print configurations codes \"mode\", a common shape and expected String representation will be supplied:","code":{"shape":["[2,3]","[2,3]","[3,2]","[2,3,4]","[2,2,3,4]","[2, 70]","[2, 100]","[70, 2]"],"mode":["\"fap\"","\"fa\"","\"fp\"","\"fp\"","\"fp\"","\"f\"","\"f\"","\"f\""],"expected":["\"(2x3):[\\n [ -4.0 , -3.0 , -2.0 ],\\n [ -1.0 , 0.0 , 1.0 ]\\n]\"","\"(2x3):[\\n [ -4.0, -3.0, -2.0 ],\\n [ -1.0, 0.0, 1.0 ]\\n]\"","\"(3x2):[\\n [ -4.0 , -3.0 ],\\n [ -2.0 , -1.0 ],\\n [ 0.0 , 1.0 ]\\n]\"","\"(2x3x4):[\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n]\"","\"(2x2x3x4):[\\n [\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n ],\\n [\\n [\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ],\\n [ -2.0 , -1.0 , 0.0 , 1.0 ]\\n ],\\n [\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ]\\n ]\\n ]\\n]\"","\"(2x70):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(2x100):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(70x2):[\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n ... 38 more ...\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ]\\n]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Tensors as String can be formatted depending on shape. [7]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We configure a NDPrintSettings object.","code":["def settings ="," Neureka.get()"," .settings()"," .view()"," .getNDPrintSettings()"," .clone()"," .setRowLimit( mode.contains( \"s\" ) ? 3 : 32 )"," .setIsScientific( mode.contains( \"c\" ) )"," .setIsMultiline( mode.contains( \"f\" ) )"," .setHasGradient( mode.contains( \"g\" ) )"," .setCellSize( mode.contains( \"p\" ) ? 6 : mode.contains( \"f\" ) ? 2 : 1 )"," .setHasValue( !(mode.contains( \"shp\" ) || mode.contains(\"shape\")) )"," .setHasRecursiveGraph( mode.contains( \"r\" ) )"," .setHasDerivatives( mode.contains( \"d\" ) )"," .setHasShape( !mode.contains( \"v\" ) )"," .setIsCellBound( mode .contains( \"b\" ) )"," .setPostfix( \"\" )"," .setPrefix( \"\" )"," .setHasSlimNumbers( false )"]}, + + {"kind":"and","text":"Four tensors of various data types:","code":["Tensor t1 = Tensor.of( Float.class, shape, -4f..5f ).set( Tensor.of( shape, -7f..3f ) )","Tensor t2 = Tensor.of( Double.class, shape, -4d..5d ).set( Tensor.of( shape, -7d..3d ) )","Tensor t3 = Tensor.of( Integer.class, shape, -4..5 ).set( Tensor.of( shape, -7..3 ) )","Tensor t4 = Tensor.of( Short.class, shape, (-4 as short)..(5 as short) ).set( Tensor.of( shape, (-7 as short)..(3 as short) ) )","Tensor t5 = Tensor.of( Byte.class, shape, (-4 as byte )..(5 as byte ) ).set( Tensor.of( shape, (-7 as byte)..(3 as byte) ) )"]}, + + {"kind":"expect","text":"The first tensor has the expected internals and produces the correct String representation.","code":["t1.toString(settings) == expected","t1.dataType == DataType.of( Float.class )","t1.mut.data.get() instanceof float[]"]}, + + {"kind":"and","text":"The second tensor has the expected internals and produces the correct String representation.","code":["t2.toString(settings) == expected","t2.dataType == DataType.of( Double.class )","t2.mut.data.get() instanceof double[]"]}, + + {"kind":"and","text":"The third tensor has the expected internals and produces the correct String representation.","code":["t3.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t3.dataType == DataType.of( Integer.class )","t3.mut.data.get() instanceof int[]"]}, + + {"kind":"and","text":"The fourth tensor has the expected internals and produces the correct String representation.","code":["t4.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t4.dataType == DataType.of( Short.class )","t4.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The fifth tensor has the expected internals and produces the correct String representation.","code":["t5.toString(settings).replace(' ','') == expected.replace('.0',' ').replace(' ','')","t5.dataType == DataType.of( Byte.class )","t5.mut.data.get() instanceof byte[]"]}, + + {"kind":"where","text":"The print configurations codes \"mode\", a common shape and expected String representation will be supplied:","code":{"shape":["[2,3]","[2,3]","[3,2]","[2,3,4]","[2,2,3,4]","[2, 70]","[2, 100]","[70, 2]"],"mode":["\"fap\"","\"fa\"","\"fp\"","\"fp\"","\"fp\"","\"f\"","\"f\"","\"f\""],"expected":["\"(2x3):[\\n [ -4.0 , -3.0 , -2.0 ],\\n [ -1.0 , 0.0 , 1.0 ]\\n]\"","\"(2x3):[\\n [ -4.0, -3.0, -2.0 ],\\n [ -1.0, 0.0, 1.0 ]\\n]\"","\"(3x2):[\\n [ -4.0 , -3.0 ],\\n [ -2.0 , -1.0 ],\\n [ 0.0 , 1.0 ]\\n]\"","\"(2x3x4):[\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n]\"","\"(2x2x3x4):[\\n [\\n [\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ]\\n ],\\n [\\n [ -2.0 , -1.0 , 0.0 , 1.0 ],\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ]\\n ]\\n ],\\n [\\n [\\n [ 0.0 , 1.0 , 2.0 , 3.0 ],\\n [ 4.0 , 5.0 , -4.0 , -3.0 ],\\n [ -2.0 , -1.0 , 0.0 , 1.0 ]\\n ],\\n [\\n [ 2.0 , 3.0 , 4.0 , 5.0 ],\\n [ -4.0 , -3.0 , -2.0 , -1.0 ],\\n [ 0.0 , 1.0 , 2.0 , 3.0 ]\\n ]\\n ]\\n]\"","\"(2x70):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..38 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(2x100):[\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ],\\n [ -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, ..68 more.., 1.0, 2.0, 3.0, 4.0, 5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0 ]\\n]\"","\"(70x2):[\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n ... 38 more ...\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ],\\n [ -4.0, -3.0 ],\\n [ -2.0, -1.0 ],\\n [ 0.0, 1.0 ],\\n [ 2.0, 3.0 ],\\n [ 4.0, 5.0 ]\\n]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + { "id":"We can create scalar tensors.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -117,7 +374,7 @@ { "id":"Tensor created from shape and datatype has expected state.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -134,7 +391,34 @@ }, { - "id":"The data and the value of a tensor a 2 different things!", + "id":"The data and the value of a tensor a 2 different things! [0]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a simple vector:","code":["var v = Tensor.ofFloats().withShape(3).andFill(-2, 4, 8)"]}, + + {"kind":"and","text":"And then we store it on the device we want to test.","code":["v.to(device)"]}, + + {"kind":"when","text":"We create a slice of the above vector, a scalar...","code":["var s = v.slice().axis(0).at(1).get()"]}, + + {"kind":"then","text":"The slice contains the expected value with respect to the slice parent...","code":["v.at(1).get() == s.at(0).get()"]}, + + {"kind":"and","text":"They both do not share the same value array.","code":["v.items != s.items"]}, + + {"kind":"and","text":"They do however share the same underlying data.","code":["v.mut.data.get() == s.mut.data.get()"]}, + + {"kind":"and","text":"The tensor simply stores the number 4.","code":["s.items == [4f]"]}, + + {"kind":"where","text":"We test the following devices:","code":{"device":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The data and the value of a tensor a 2 different things! [1]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.tensors.Tensor_Stats_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Stats_Spec.json index ca394ef51..dc919ea8c 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Stats_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Stats_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.tensors.Tensor_Stats_Spec", "title":"Reducing Tensors", - "narrative":"Various kinds of operations reduce tensors to scalars,\n the most common ones being the min and max operations\n which find the smallest as well as largest number among all\n items of a tensor.\n Neureka exposes various different ways to achieve this,\n all of which are also differential (autograd support).", + "narrative":"Various kinds of operations reduce tensors to scalars,\n the most common ones being the min and max operations \n which find the smallest as well as largest number among all \n items of a tensor.\n Neureka exposes various different ways to achieve this,\n all of which are also differential (autograd support).", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"8", + "runs":"22", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.230 seconds" + "duration":"0.225 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"We can use the max operation as a function", + "id":"We can use the max operation as a function [0]", "result":"PASS", - "duration":"0.081 seconds", + "duration":"0.007 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -39,12 +39,337 @@ }, { - "id":"We can get pre-instantiated min and max functions from the library context.", + "id":"We can use the max operation as a function [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [4]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [9]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [10]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [11]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [12]", "result":"PASS", "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can use the max operation as a function [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a min/max function:","code":["var fun = Function.of(reduceType.toLowerCase() + \"(I[0])\")"]}, + + {"kind":"and","text":"A seed, for some variability:","code":["var seed = dataType.getSimpleName().hashCode() + reduceType.hashCode()"]}, + + {"kind":"and","text":"","code":["var a = Tensor.of(dataType)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((seed+31**(i+13))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"when","text":"We apply the function to the tensor:","code":["var result = fun(a)"]}, + + {"kind":"then","text":"The result is correct:","code":["result.items[0] == expected"]}, + + {"kind":"where","text":"","code":{"reduceType":["'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'","'MIN'","'MAX'"],"dataType":["Float","Float","Double","Double","Integer","Integer","Long","Long","Short","Short","Byte","Byte","Float","Float"],"device":["'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'CPU'","'GPU'","'GPU'"],"expected":["-148.0","141.0","-143.0","149.0","-121","148","-146","147","-148","146","-127","124","-148.0","141.0"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can get pre-instantiated min and max functions from the library context.", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"We access the pre-instantiated max function:","code":["var min = Neureka.get().backend.autogradFunction.min"]}, @@ -56,9 +381,30 @@ }, { - "id":"There is no need to use a function, we can use the min() and max() methods on tensors instead.", + "id":"There is no need to use a function, we can use the min() and max() methods on tensors instead. [0]", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.007 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"We create a tensor:","code":["var a = Tensor.of(Float)"," .withShape(19, 7)"," .andWhere({ i, _ -> ((31**(i+42))%301)-151})"]}, + + {"kind":"and","text":"Before applying the function, we copy the tensor to the device:","code":["a.to(device)"]}, + + {"kind":"and","text":"We access the min and max methods:","code":["var min = a.min()","var max = a.max()"]}, + + {"kind":"expect","text":"The results are correct:","code":["min.item(0) == -150.0","max.item(0) == 147.0"]}, + + {"kind":"where","text":"","code":{"device":["'CPU'","'GPU'"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"There is no need to use a function, we can use the min() and max() methods on tensors instead. [1]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -79,7 +425,7 @@ { "id":"Both the min and max operation support autograd (back-propagation).", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.008 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -100,7 +446,7 @@ { "id":"We can use the \"sum\" method to sum the items of a tensor.", "result":"PASS", - "duration":"0.130 seconds", + "duration":"0.172 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -119,7 +465,7 @@ { "id":"The sum operation support autograd (back-propagation).", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -140,7 +486,7 @@ { "id":"A tensor can be summed alongside a specific axis.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -157,7 +503,7 @@ { "id":"Multiple dimensions of a tensor can selectively be summed up.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":["\n Given a tensor with a shape of (x, y, z) and the request to sum up axis 1 and 2,\n then the result will be a tensor with a shape of (x, 1, 1) because the\n sum of all values along the axis 1 and 2 is a single value for each of the two\n first dimensions.\n This operation supports autograd.\n "] }, diff --git a/docs/spock/reports/ut.tensors.Tensor_Version_Spec.json b/docs/spock/reports/ut.tensors.Tensor_Version_Spec.json index b7b89f388..ec6b48714 100644 --- a/docs/spock/reports/ut.tensors.Tensor_Version_Spec.json +++ b/docs/spock/reports/ut.tensors.Tensor_Version_Spec.json @@ -1,22 +1,22 @@ { "className":"ut.tensors.Tensor_Version_Spec", "title":"Tensor (Data Array) Version", - "narrative":"There are two fundamental categories of operations\n which can be applied to tensors :\n Inline operations and Non-Inline operations!\n\n Inline operations are often times problematic because they produce\n side effects by changing passed tensors instead of producing new ones...\n One such bad side effect can easily occur for tensors involved in the\n autograd system, more specifically: the recorded computation graph.\n Inline operations can break the mathematically pureness of the back-propagation\n procedure by for example changing partial derivatives...
        \n In order to prevent said errors from occurring unnoticed tensors\n have versions which will increment when the underlying data of the tensor changes.\n This version will be tracked by the computation graph as well in order to\n match it with the ones stored inside the tensor.\n A mismatch would then yield an exception!\n\n This specification is responsible for defining the behaviour of this\n version number with respect to their wrapping tensors as well as computation graph nodes.", + "narrative":"There are two fundamental categories of operations\n which can be applied to tensors : \n Inline operations and Non-Inline operations! \n\n Inline operations are often times problematic because they produce\n side effects by changing passed tensors instead of producing new ones... \n One such bad side effect can easily occur for tensors involved in the\n autograd system, more specifically: the recorded computation graph. \n Inline operations can break the mathematically pureness of the back-propagation\n procedure by for example changing partial derivatives...
        \n In order to prevent said errors from occurring unnoticed tensors\n have versions which will increment when the underlying data of the tensor changes. \n This version will be tracked by the computation graph as well in order to\n match it with the ones stored inside the tensor. \n A mismatch would then yield an exception! \n\n This specification is responsible for defining the behaviour of this\n version number with respect to their wrapping tensors as well as computation graph nodes.", "subjects":["neureka.Tensor"], "statistics":{ - "runs":"4", + "runs":"22", "successRate":"100.0%", "failures":"0", "errors":"0", - "skipped":"0", - "duration":"0.050 seconds" + "skipped":"1", + "duration":"0.135 seconds" }, "headers":["\n

        \n Here you can find out how the version number of a tensor is\n set and tracked.\n

        \n "],"tags":{},"see":[], "features":[ { - "id":"Inline operations causes version incrementation.", + "id":"Inline operations causes version incrementation. [0]", "result":"PASS", - "duration":"0.013 seconds", + "duration":"0.010 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,9 +37,305 @@ }, { - "id":"Non-inline operations do not cause version incrementation.", + "id":"Inline operations causes version incrementation. [1]", "result":"PASS", - "duration":"0.012 seconds", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"2 tensors a and b.","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( safe_inline )","Tensor a = Tensor.of(4d) + Tensor.of(2d)","Tensor b = Tensor.of(-1d) + Tensor.of(-3d).setRqsGradient(true)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"Initially both tensors have a version number of 0.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code (performing inline operations) is being evaluated.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected sub-string.","code":["c.toString().contains(expected)","c == a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"","code":{"code":["' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '","' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '"],"safe_inline":["true","true","true","true","false","false","false","false"],"version_of_c":["1","1","1","1","0","0","0","0"],"version_of_a":["1","1","1","1","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations causes version incrementation. [2]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"2 tensors a and b.","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( safe_inline )","Tensor a = Tensor.of(4d) + Tensor.of(2d)","Tensor b = Tensor.of(-1d) + Tensor.of(-3d).setRqsGradient(true)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"Initially both tensors have a version number of 0.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code (performing inline operations) is being evaluated.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected sub-string.","code":["c.toString().contains(expected)","c == a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"","code":{"code":["' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '","' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '"],"safe_inline":["true","true","true","true","false","false","false","false"],"version_of_c":["1","1","1","1","0","0","0","0"],"version_of_a":["1","1","1","1","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations causes version incrementation. [3]", + "result":"PASS", + "duration":"0.006 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"2 tensors a and b.","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( safe_inline )","Tensor a = Tensor.of(4d) + Tensor.of(2d)","Tensor b = Tensor.of(-1d) + Tensor.of(-3d).setRqsGradient(true)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"Initially both tensors have a version number of 0.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code (performing inline operations) is being evaluated.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected sub-string.","code":["c.toString().contains(expected)","c == a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"","code":{"code":["' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '","' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '"],"safe_inline":["true","true","true","true","false","false","false","false"],"version_of_c":["1","1","1","1","0","0","0","0"],"version_of_a":["1","1","1","1","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations causes version incrementation. [4]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"2 tensors a and b.","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( safe_inline )","Tensor a = Tensor.of(4d) + Tensor.of(2d)","Tensor b = Tensor.of(-1d) + Tensor.of(-3d).setRqsGradient(true)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"Initially both tensors have a version number of 0.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code (performing inline operations) is being evaluated.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected sub-string.","code":["c.toString().contains(expected)","c == a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"","code":{"code":["' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '","' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '"],"safe_inline":["true","true","true","true","false","false","false","false"],"version_of_c":["1","1","1","1","0","0","0","0"],"version_of_a":["1","1","1","1","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations causes version incrementation. [5]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"2 tensors a and b.","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( safe_inline )","Tensor a = Tensor.of(4d) + Tensor.of(2d)","Tensor b = Tensor.of(-1d) + Tensor.of(-3d).setRqsGradient(true)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"Initially both tensors have a version number of 0.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code (performing inline operations) is being evaluated.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected sub-string.","code":["c.toString().contains(expected)","c == a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"","code":{"code":["' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '","' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '"],"safe_inline":["true","true","true","true","false","false","false","false"],"version_of_c":["1","1","1","1","0","0","0","0"],"version_of_a":["1","1","1","1","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations causes version incrementation. [6]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"2 tensors a and b.","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( safe_inline )","Tensor a = Tensor.of(4d) + Tensor.of(2d)","Tensor b = Tensor.of(-1d) + Tensor.of(-3d).setRqsGradient(true)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"Initially both tensors have a version number of 0.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code (performing inline operations) is being evaluated.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected sub-string.","code":["c.toString().contains(expected)","c == a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"","code":{"code":["' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '","' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '"],"safe_inline":["true","true","true","true","false","false","false","false"],"version_of_c":["1","1","1","1","0","0","0","0"],"version_of_a":["1","1","1","1","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations causes version incrementation. [7]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"2 tensors a and b.","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( safe_inline )","Tensor a = Tensor.of(4d) + Tensor.of(2d)","Tensor b = Tensor.of(-1d) + Tensor.of(-3d).setRqsGradient(true)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"Initially both tensors have a version number of 0.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code (performing inline operations) is being evaluated.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected sub-string.","code":["c.toString().contains(expected)","c == a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"","code":{"code":["' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '","' a.mut.plusAssign(b) '","' a.mut.minusAssign(b) '","' a.mut.timesAssign(b) '","' a.mut.divAssign(b) '"],"safe_inline":["true","true","true","true","false","false","false","false"],"version_of_c":["1","1","1","1","0","0","0","0"],"version_of_a":["1","1","1","1","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [0]", + "result":"PASS", + "duration":"0.007 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [1]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [2]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [3]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [4]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [5]", + "result":"PASS", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -64,12 +360,120 @@ }, { - "id":"Inline operations cause illegal state exceptions.", + "id":"Non-inline operations do not cause version incrementation. [6]", "result":"PASS", "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [7]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [8]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Non-inline operations do not cause version incrementation. [9]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( no_inline )"]}, + + {"kind":"and","text":"Two tensors, one requiring gradients and the other one not.","code":["Tensor a = Tensor.of(6d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)"]}, + + {"kind":"and","text":"A binding for both tensors as preparation for calling the Groovy shell.","code":["Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"The versions of both tensors are 0 initially.","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The Groovy code is being evaluated inside the Groovy shell.","code":["Tensor c = new GroovyShell(binding).evaluate((code))"]}, + + {"kind":"then","text":"The resulting tensor (toString) will contain the expected String.","code":["c.toString().contains(expected)","c != a"]}, + + {"kind":"and","text":"The three tensors have the expected versions.","code":["a.getVersion() == version_of_a","b.getVersion() == version_of_b","c.getVersion() == version_of_c"]}, + + {"kind":"where","text":"The following arguments are being used:","code":{"code":["' a + b '","' a - b '","' a * b '","' a / b '","' a % b '","' a + b '","' a - b '","' a * b '","' a / b '","' a % b '"],"no_inline":["false","false","false","false","false","true","true","true","true","true"],"version_of_c":["0","0","0","0","0","0","0","0","0","0"],"version_of_a":["0","0","0","0","0","0","0","0","0","0"],"version_of_b":["0","0","0","0","0","0","0","0","0","0"],"expected":["\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\"","\"(1):[2.0]\"","\"(1):[10.0]\"","\"(1):[-24.0]\"","\"(1):[-1.5]\"","\"(1):[2.0]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations cause illegal state exceptions. [0]", + "result":"PASS", + "duration":"0.008 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( true )","Tensor a = Tensor.of(4d) + Tensor.of(2d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, @@ -87,9 +491,78 @@ }, { - "id":"Storing a tensor on a device should not change the version of a tensor (Even though its data changed technically).", + "id":"Inline operations cause illegal state exceptions. [1]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( true )","Tensor a = Tensor.of(4d) + Tensor.of(2d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["Tensor c = new GroovyShell( binding ).evaluate( code )"]}, + + {"kind":"then","text":"An illegal state exception is being thrown.","code":["def exception = thrown(IllegalStateException)","exception.message == message"]}, + + {"kind":"and","text":"The variable \"c\" is null!","code":["c == null"]}, + + {"kind":"where","text":"","code":{"code":["'a.mut.plusAssign(b) '","'a.mut.minusAssign(b)'","'a.mut.timesAssign(b)'","'a.mut.divAssign(b) '"],"message":["\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations cause illegal state exceptions. [2]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( true )","Tensor a = Tensor.of(4d) + Tensor.of(2d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["Tensor c = new GroovyShell( binding ).evaluate( code )"]}, + + {"kind":"then","text":"An illegal state exception is being thrown.","code":["def exception = thrown(IllegalStateException)","exception.message == message"]}, + + {"kind":"and","text":"The variable \"c\" is null!","code":["c == null"]}, + + {"kind":"where","text":"","code":{"code":["'a.mut.plusAssign(b) '","'a.mut.minusAssign(b)'","'a.mut.timesAssign(b)'","'a.mut.divAssign(b) '"],"message":["\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Inline operations cause illegal state exceptions. [3]", + "result":"PASS", + "duration":"0.005 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Neureka.get().settings().autograd().setIsPreventingInlineOperations( true )","Tensor a = Tensor.of(4d) + Tensor.of(2d).setRqsGradient(true)","Tensor b = Tensor.of(-4d)","Binding binding = new Binding()","binding.setVariable('a', a)","binding.setVariable('b', b)"]}, + + {"kind":"expect","text":"","code":["a.getVersion() == 0","b.getVersion() == 0"]}, + + {"kind":"when","text":"The groovy code is being evaluated.","code":["Tensor c = new GroovyShell( binding ).evaluate( code )"]}, + + {"kind":"then","text":"An illegal state exception is being thrown.","code":["def exception = thrown(IllegalStateException)","exception.message == message"]}, + + {"kind":"and","text":"The variable \"c\" is null!","code":["c == null"]}, + + {"kind":"where","text":"","code":{"code":["'a.mut.plusAssign(b) '","'a.mut.minusAssign(b)'","'a.mut.timesAssign(b)'","'a.mut.divAssign(b) '"],"message":["\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\"","\"Inline operation occurred on tensor which is part of a computation graph node with autograd support!\\nThe following OperationType caused an internal version mismatch: 'left_inline'\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Storing a tensor on a device should not change the version of a tensor (Even though its data changed technically).", + "result":"IGNORED", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.exceptions.Tensor_Delete_Exception_Spec.json b/docs/spock/reports/ut.tensors.exceptions.Tensor_Delete_Exception_Spec.json index 8100599a6..d8636db26 100644 --- a/docs/spock/reports/ut.tensors.exceptions.Tensor_Delete_Exception_Spec.json +++ b/docs/spock/reports/ut.tensors.exceptions.Tensor_Delete_Exception_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.014 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"A deleted tensor will tell you that it has been deleted.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -31,7 +31,7 @@ { "id":"A deleted tensor will throw an exception when accessing its configuration.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -50,7 +50,7 @@ { "id":"A deleted tensor will throw an exception when trying to set its configuration.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -69,7 +69,7 @@ { "id":"A deleted tensor will throw an exception when accessing its data.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -88,7 +88,7 @@ { "id":"A deleted tensor will throw an exception when trying to modify its data.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -107,7 +107,7 @@ { "id":"A deleted tensor will throw an exception when accessing its data type.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -126,7 +126,7 @@ { "id":"A deleted tensor will throw an exception when modifying its data type.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.tensors.exceptions.Tensor_Exception_Spec.json b/docs/spock/reports/ut.tensors.exceptions.Tensor_Exception_Spec.json index ae0e19a17..52ecd657a 100644 --- a/docs/spock/reports/ut.tensors.exceptions.Tensor_Exception_Spec.json +++ b/docs/spock/reports/ut.tensors.exceptions.Tensor_Exception_Spec.json @@ -4,19 +4,19 @@ "narrative":"This specification covers the behavior of the Tensor class in\n exceptional scenarios which are contrary to its intended use.\n The purpose of this is to assert that the Tensor class will provide\n useful feedback to a user to explain that a misuse of its API\n occurred so that the user can correct this misuse.", "subjects":[], "statistics":{ - "runs":"8", + "runs":"25", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.014 seconds" + "duration":"0.047 seconds" }, "headers":["\n

        Tensors Exception Behavior

        \n
        \n

        \n This specification covers the behavior of tensors\n in exceptional situations. \n

        \n "],"tags":{},"see":[], "features":[ { "id":"Trying to inject an empty tensor into another causes fitting exception.", "result":"PASS", - "duration":"0", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -31,7 +31,28 @@ }, { - "id":"Passing null to various methods of the tensor API will throw exceptions.", + "id":"Passing null to various methods of the tensor API will throw exceptions. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [1]", "result":"PASS", "duration":"0", "iterations":{ @@ -52,12 +73,348 @@ }, { - "id":"Passing an invalid object into Tensor constructor causes descriptive exception.", + "id":"Passing null to various methods of the tensor API will throw exceptions. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [8]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [9]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [10]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [11]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [12]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [13]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [14]", "result":"PASS", "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [15]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [16]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing null to various methods of the tensor API will throw exceptions. [17]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["Tensor t = Tensor.of(1, 2, 3)"]}, + + {"kind":"when","text":"","code":["errorCode(t)"]}, + + {"kind":"then","text":"","code":["var exception = thrown(type)"]}, + + {"kind":"and","text":"","code":["exception.message != \"\" && exception.message.length() > 13"]}, + + {"kind":"where","text":"","code":{"type":["IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException","IllegalArgumentException"],"errorCode":["{ Tensor x -> x.times((Tensor)null) }","{ Tensor x -> x.div((Tensor)null) }","{ Tensor x -> x.plus((Tensor)null) }","{ Tensor x -> x.mod((Tensor)null) }","{ Tensor x -> x.mut.timesAssign((Tensor)null) }","{ Tensor x -> x.mut.divAssign((Tensor)null) }","{ Tensor x -> x.mut.plusAssign((Tensor)null) }","{ Tensor x -> x.mut.modAssign((Tensor)null) }","{ Tensor x -> x.mut.minusAssign((Tensor)null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((String[][])null) }","{ Tensor x -> x.mut.labelAxes((Map)null) }","{ Tensor x -> x.mut.label(null) }","{ Tensor x -> x.withLabels((String[][])null) }","{ Tensor x -> x.withLabels(null, (String[])null) }","{ Tensor x -> x.withLabels((String[])null) }","{ Tensor x -> x.withLabels((Map)null) }","{ Tensor x -> x.withLabel(null) }"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Passing an invalid object into Tensor constructor causes descriptive exception.", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, "blocks":[ {"kind":"when","text":"A tensor is being instantiated with a nonsensical parameter.","code":["Tensor.ofRandom(Scanner.class, 2, 4)"]}, @@ -69,7 +426,7 @@ { "id":"Passing an invalid key object into the \"getAt\" method causes a descriptive exception.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -86,7 +443,7 @@ { "id":"Out of dimension bound causes descriptive exception!", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -116,7 +473,7 @@ { "id":"Casting a tensor as something unusual will cuas an exception to be thrown.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -133,7 +490,7 @@ { "id":"Building a tensor with \"null\" as shape argument throws an exception.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.utility.Cleaner_Testing.json b/docs/spock/reports/ut.utility.Cleaner_Testing.json index ddcba964f..dd92a9631 100644 --- a/docs/spock/reports/ut.utility.Cleaner_Testing.json +++ b/docs/spock/reports/ut.utility.Cleaner_Testing.json @@ -1,7 +1,7 @@ { "className":"ut.utility.Cleaner_Testing", "title":"How Neureka Cleans Up", - "narrative":"Under the hood\n Neureka deals whith large arrays of\n data, which are often times\n native data arrays requiring explicit\n memory freeing!\n This freeing of memory can happen at any time\n during the livetime of a nd-array, however\n it should happen at least up until the nd-arra/tensor\n objects representing their referenced data arrays become\n eligible for garbage collection.\n This specification ensures that the custom garbage\n cleaner implementation used by Neureka fulfills this role", + "narrative":"Under the hood \n Neureka deals whith large arrays of\n data, which are often times \n native data arrays requiring explicit\n memory freeing!\n This freeing of memory can happen at any time\n during the livetime of a nd-array, however\n it should happen at least up until the nd-arra/tensor\n objects representing their referenced data arrays become\n eligible for garbage collection.\n This specification ensures that the custom garbage\n cleaner implementation used by Neureka fulfills this role", "subjects":[], "statistics":{ "runs":"1", @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.260 seconds" + "duration":"1.304 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The DeviceCleaner triggers registered cleaner actions when things are eligible for GC.", "result":"PASS", - "duration":"0.259 seconds", + "duration":"1.302 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -25,11 +25,11 @@ {"kind":"when","text":"","code":["r1 = null","r3 = null","System.gc()"]}, - {"kind":"then","text":"","code":["Sleep.until(700, { refCount == 8 && cleaner._registered == 8 })","r1 == null","r2 != null","r3 == null","r4 != null","r5 != null","r6 != null","r7 != null","r8 != null","r9 != null","r10 != null"]}, + {"kind":"then","text":"","code":["Sleep.until(700, { refCount == 8 && cleaner._toBeCleaned.size() == 8 })","r1 == null","r2 != null","r3 == null","r4 != null","r5 != null","r6 != null","r7 != null","r8 != null","r9 != null","r10 != null"]}, {"kind":"when","text":"","code":["r2 = null","r4 = null","System.gc()"]}, - {"kind":"then","text":"","code":["Sleep.until(750, { refCount == 6 && cleaner._registered == 6 })","r1 == null","r2 == null","r3 == null","r4 == null","r5 != null","r6 != null","r7 != null","r8 != null","r9 != null","r10 != null"]} + {"kind":"then","text":"","code":["Sleep.until(750, { refCount == 6 && cleaner._toBeCleaned.size() == 6 })","r1 == null","r2 == null","r3 == null","r4 == null","r5 != null","r6 != null","r7 != null","r8 != null","r9 != null","r10 != null"]} ], "problems":{"dataValues":[], "errors":[]} } diff --git a/docs/spock/reports/ut.utility.DataConverter_Spec.json b/docs/spock/reports/ut.utility.DataConverter_Spec.json index 91fc62d8e..d0a7d0223 100644 --- a/docs/spock/reports/ut.utility.DataConverter_Spec.json +++ b/docs/spock/reports/ut.utility.DataConverter_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.002 seconds" + "duration":"0.009 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The DataConverter can convert the given array data.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -31,7 +31,7 @@ { "id":"An array of any type of object may be converted to a array of primitives.", "result":"PASS", - "duration":"0", + "duration":"0.004 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.utility.FileHandle_Spec.json b/docs/spock/reports/ut.utility.FileHandle_Spec.json index 07c23e016..999b5ca80 100644 --- a/docs/spock/reports/ut.utility.FileHandle_Spec.json +++ b/docs/spock/reports/ut.utility.FileHandle_Spec.json @@ -4,19 +4,226 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"6", + "runs":"24", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.536 seconds" + "duration":"0.787 seconds" }, "headers":["\n This specification covers the expected functionality of\n various \"FileHandle\" implementations.\n Such implementations ought to be able to save tensors to\n a given directory in the file format that they represent.\n Functionalities like : \"store\", \"restore\" and \"free\" must\n behave as expected.\n (For more information take a look a the \"FileHandle\" & \"Storage\" interface)\n "],"tags":{},"see":[], "features":[ { - "id":"Test writing IDX file format.", + "id":"Test writing IDX file format. [0]", "result":"PASS", - "duration":"0.001 seconds", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [7]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [8]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test writing IDX file format. [9]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -37,9 +244,32 @@ }, { - "id":"Test reading IDX file format.", + "id":"Test writing IDX file format. [10]", "result":"PASS", - "duration":"0.006 seconds", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":[" Neureka.get().settings().view().ndArrays({ NDPrintSettings it ->"," it.setIsScientific( true )"," it.setIsMultiline( false )"," it.setHasGradient( true )"," it.setCellSize( 1 )"," it.setHasValue( true )"," it.setHasRecursiveGraph( false )"," it.setHasDerivatives( false )"," it.setHasShape( true )"," it.setIsCellBound( false )"," it.setPostfix( \"\" )"," it.setPrefix( \"\" )"," it.setHasSlimNumbers( false )","})"]}, + + {"kind":"when","text":"A new IDX file handle for the given filename is being instantiated.","code":["IDXHandle idx = new IDXHandle(tensor, \"build/test-can/\"+filename)"]}, + + {"kind":"then","text":"The file will then exist at the following path: ","code":["new File(\"build/test-can/\"+filename).exists()"]}, + + {"kind":"when","text":"The \"load\" method is being called in order to load the tensor into memory.","code":["Tensor loaded = idx.load()"]}, + + {"kind":"then","text":"The loaded tensor is as expected...","code":["loaded != null","loaded.toString() == expected","loaded.getDataType().getRepresentativeType() == type"]}, + + {"kind":"where","text":"The following paths and file names are being used for testing : ","code":{"tensor":["Tensor.of([2, 4], -2d..4d)","Tensor.of([2, 4], 2d)","Tensor.of(Float, [8], -2f..4f)","Tensor.of([4, 2], 2f)","Tensor.of(Integer, [3], 2..4)","Tensor.of([2, 2], 2)","Tensor.of(Short, [2], 2..4)","Tensor.of([2, 2], 2 as short)","Tensor.of(Byte, [2], 2..4)","Tensor.of([1, 2], 2 as byte)","Tensor.of(Long, [6], -3..4)"],"type":["F64.class","F64.class","F32.class","F32.class","I32.class","I32.class","I16.class","I16.class","I8.class","I8.class","I64.class"],"filename":["\"test.idx3-ubyte\"","\"test2.idx\"","\"test_f32_1.idx\"","\"test_f32_2.idx\"","\"test_i32_1.idx\"","\"test_i32_2.idx\"","\"test_i16_1.idx\"","\"test_i16_2.idx\"","\"test_i8_1.idx\"","\"test_i8_2.idx\"","\"test_i64_1.idx\""],"expected":["\"(2x4):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(2x4):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(8):[-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, -2.0]\"","\"(4x2):[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]\"","\"(3):[2, 3, 4]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(2x2):[2, 2, 2, 2]\"","\"(2):[2, 3]\"","\"(1x2):[2, 2]\"","\"(6):[-3, -2, -1, 0, 1, 2]\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test reading IDX file format. [0]", + "result":"PASS", + "duration":"0.009 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -64,9 +294,63 @@ }, { - "id":"We can load image files as tensors.", + "id":"Test reading IDX file format. [1]", "result":"PASS", - "duration":"0.508 seconds", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A variable for storing a hash :","code":["def hash = \"\""]}, + + {"kind":"when","text":"The given idx file is being loaded by the \"IDXHead\" class into a new tensor...","code":["IDXHandle idx = new IDXHandle( \"build/resources/test/idx/\" + filename )","Tensor loaded = idx.load()"]}, + + {"kind":"and","text":"... this new tensor is then hashed ...","code":["loaded.forEach( e -> hash = ( hash + e ).digest(\"md5\") )"]}, + + {"kind":"then","text":"The hash is as expected.","code":["hash == expected"]}, + + {"kind":"and","text":"The loaded tensor has the expected data type.","code":["loaded.dataType.getRepresentativeType() == I16.class","loaded.dataType == DataType.of( I16.class )"]}, + + {"kind":"and","text":"It contains the correct array type.","code":["loaded.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The \"IDXHead\" instance has the expected state :","code":["idx.valueSize == 28 * 28","idx.valueSize == 28 * 28 * 1 // 1 := ubyte","idx.fileName == filename","idx.location.endsWith( filename )","idx.totalSize == 28 * 28 * 1 + 16","idx.dataType != loaded.dataType","idx.dataType == DataType.of( UI8.class ) // The underlying data is unsigned byte! (Not supported by JVM)"]}, + + {"kind":"where","text":"The following files and the expected hashes of their data were used :","code":{"filename":["\"MNIST-sample-1.idx\"","\"MNIST-sample-2.idx\"","\"MNIST-sample-3.idx\""],"expected":["\"c74e87c7a93605e7a1660ec9e17dcf9f\"","\"4a57297981456a467a302c8738b3ac50\"","\"87eade8bb5659d324030f4e84f6745e7\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Test reading IDX file format. [2]", + "result":"PASS", + "duration":"0.002 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"A variable for storing a hash :","code":["def hash = \"\""]}, + + {"kind":"when","text":"The given idx file is being loaded by the \"IDXHead\" class into a new tensor...","code":["IDXHandle idx = new IDXHandle( \"build/resources/test/idx/\" + filename )","Tensor loaded = idx.load()"]}, + + {"kind":"and","text":"... this new tensor is then hashed ...","code":["loaded.forEach( e -> hash = ( hash + e ).digest(\"md5\") )"]}, + + {"kind":"then","text":"The hash is as expected.","code":["hash == expected"]}, + + {"kind":"and","text":"The loaded tensor has the expected data type.","code":["loaded.dataType.getRepresentativeType() == I16.class","loaded.dataType == DataType.of( I16.class )"]}, + + {"kind":"and","text":"It contains the correct array type.","code":["loaded.mut.data.get() instanceof short[]"]}, + + {"kind":"and","text":"The \"IDXHead\" instance has the expected state :","code":["idx.valueSize == 28 * 28","idx.valueSize == 28 * 28 * 1 // 1 := ubyte","idx.fileName == filename","idx.location.endsWith( filename )","idx.totalSize == 28 * 28 * 1 + 16","idx.dataType != loaded.dataType","idx.dataType == DataType.of( UI8.class ) // The underlying data is unsigned byte! (Not supported by JVM)"]}, + + {"kind":"where","text":"The following files and the expected hashes of their data were used :","code":{"filename":["\"MNIST-sample-1.idx\"","\"MNIST-sample-2.idx\"","\"MNIST-sample-3.idx\""],"expected":["\"c74e87c7a93605e7a1660ec9e17dcf9f\"","\"4a57297981456a467a302c8738b3ac50\"","\"87eade8bb5659d324030f4e84f6745e7\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can load image files as tensors. [0]", + "result":"PASS", + "duration":"0.587 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -85,9 +369,129 @@ }, { - "id":"The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors.", + "id":"We can load image files as tensors. [1]", "result":"PASS", - "duration":"0.002 seconds", + "duration":"0.014 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var hash = \"\"","var type = filename.split(\"\\\\.\")[1].toLowerCase()"]}, + + {"kind":"when","text":"","code":["FileHandle handle = FileHandle.FACTORY.getLoader(type).load(\"build/resources/test/$type/\" + filename, null)","Tensor loaded = handle.load()","loaded.forEach(e -> hash = ( hash + e ).digest('md5') )"]}, + + {"kind":"then","text":"","code":["loaded != null","!loaded.isVirtual()","loaded.size() == shape.inject( 1, {prod, value -> prod * value} )","loaded.getDataType().getRepresentativeType() == I16.class // Auto convert! (stored as I16)","hash == expected"]}, + + {"kind":"and","text":"","code":["handle.shape == shape","handle.valueSize == shape.inject( 1, {prod, value -> prod * value} )","handle.totalSize == shape.inject( 1, {prod, value -> prod * value} ) //28 * 28 * 1 + 16","handle.location.endsWith( filename )","handle.dataType == DataType.of( UI8.class )","loaded.dataType == DataType.of( I16.class )"]}, + + {"kind":"where","text":"The following jpg files with their expected shape and hash were used.","code":{"filename":["\"small.JPG\"","\"tiny.JPG\"","\"super-tiny.JPG\"","\"tiny.png\""],"shape":["[260, 410, 3]","[10, 46, 3]","[3, 4, 3]","[90, 183, 4]"],"expected":["\"b0e336b03f2ead7297e56b8ca050f34d\"","\"79bf5dd367b5ec05603e395c41dafaa7\"","\"a834038d8ddc53f170fa426c76d45df2\"","\"63bcd21a7580242a1b562bb49cb53e74\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can load image files as tensors. [2]", + "result":"PASS", + "duration":"0.011 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var hash = \"\"","var type = filename.split(\"\\\\.\")[1].toLowerCase()"]}, + + {"kind":"when","text":"","code":["FileHandle handle = FileHandle.FACTORY.getLoader(type).load(\"build/resources/test/$type/\" + filename, null)","Tensor loaded = handle.load()","loaded.forEach(e -> hash = ( hash + e ).digest('md5') )"]}, + + {"kind":"then","text":"","code":["loaded != null","!loaded.isVirtual()","loaded.size() == shape.inject( 1, {prod, value -> prod * value} )","loaded.getDataType().getRepresentativeType() == I16.class // Auto convert! (stored as I16)","hash == expected"]}, + + {"kind":"and","text":"","code":["handle.shape == shape","handle.valueSize == shape.inject( 1, {prod, value -> prod * value} )","handle.totalSize == shape.inject( 1, {prod, value -> prod * value} ) //28 * 28 * 1 + 16","handle.location.endsWith( filename )","handle.dataType == DataType.of( UI8.class )","loaded.dataType == DataType.of( I16.class )"]}, + + {"kind":"where","text":"The following jpg files with their expected shape and hash were used.","code":{"filename":["\"small.JPG\"","\"tiny.JPG\"","\"super-tiny.JPG\"","\"tiny.png\""],"shape":["[260, 410, 3]","[10, 46, 3]","[3, 4, 3]","[90, 183, 4]"],"expected":["\"b0e336b03f2ead7297e56b8ca050f34d\"","\"79bf5dd367b5ec05603e395c41dafaa7\"","\"a834038d8ddc53f170fa426c76d45df2\"","\"63bcd21a7580242a1b562bb49cb53e74\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"We can load image files as tensors. [3]", + "result":"PASS", + "duration":"0.119 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"given","text":"","code":["var hash = \"\"","var type = filename.split(\"\\\\.\")[1].toLowerCase()"]}, + + {"kind":"when","text":"","code":["FileHandle handle = FileHandle.FACTORY.getLoader(type).load(\"build/resources/test/$type/\" + filename, null)","Tensor loaded = handle.load()","loaded.forEach(e -> hash = ( hash + e ).digest('md5') )"]}, + + {"kind":"then","text":"","code":["loaded != null","!loaded.isVirtual()","loaded.size() == shape.inject( 1, {prod, value -> prod * value} )","loaded.getDataType().getRepresentativeType() == I16.class // Auto convert! (stored as I16)","hash == expected"]}, + + {"kind":"and","text":"","code":["handle.shape == shape","handle.valueSize == shape.inject( 1, {prod, value -> prod * value} )","handle.totalSize == shape.inject( 1, {prod, value -> prod * value} ) //28 * 28 * 1 + 16","handle.location.endsWith( filename )","handle.dataType == DataType.of( UI8.class )","loaded.dataType == DataType.of( I16.class )"]}, + + {"kind":"where","text":"The following jpg files with their expected shape and hash were used.","code":{"filename":["\"small.JPG\"","\"tiny.JPG\"","\"super-tiny.JPG\"","\"tiny.png\""],"shape":["[260, 410, 3]","[10, 46, 3]","[3, 4, 3]","[90, 183, 4]"],"expected":["\"b0e336b03f2ead7297e56b8ca050f34d\"","\"79bf5dd367b5ec05603e395c41dafaa7\"","\"a834038d8ddc53f170fa426c76d45df2\"","\"63bcd21a7580242a1b562bb49cb53e74\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors. [0]", + "result":"PASS", + "duration":"0.004 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["CSVHandle csv = new CSVHandle( \"build/resources/test/csv/\" + filename, params )","Tensor loaded = csv.load()","var hash = loaded.toString().digest('md5')//.forEach( e -> hash = ( hash + e ).digest('md5') )"]}, + + {"kind":"then","text":"","code":["loaded != null","!loaded.isVirtual()","loaded.size() == shape.inject( 1, {prod, value -> prod * value} )","loaded.getDataType().getItemTypeClass() == String.class // Auto convert! (stored as String)","hash == expected"]}, + + {"kind":"and","text":"","code":["csv.shape == shape","csv.valueSize == shape.inject( 1, {prod, value -> prod * value} )","csv.totalSize == byteSize","csv.dataSize == byteSize","csv.location.endsWith( filename )","csv.dataType == DataType.of( String.class )","loaded.dataType == DataType.of( String.class )"]}, + + {"kind":"where","text":"The following jpg files with their expected shape and hash were used.","code":{"filename":["\"biostats.csv\"","\"biostats.csv\"","\"biostats.csv\"","\"biostats.csv\""],"params":["[:]","[firstRowIsLabels:true]","[firstColIsIndex:true]","[firstColIsIndex:true,firstRowIsLabels:true]"],"byteSize":["753","702","639","594"],"shape":["[19, 5]","[18, 5]","[19, 4]","[18, 4]"],"expected":["\"a3dc4ede7814b5d35d20a8c9310cd63c\"","\"baac406a366a51cb6d69e97a90711050\"","\"90c5d3a4b1ea87901879993bb79e9bc1\"","\"61d75f3dccc8d6987e686d151a423310\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["CSVHandle csv = new CSVHandle( \"build/resources/test/csv/\" + filename, params )","Tensor loaded = csv.load()","var hash = loaded.toString().digest('md5')//.forEach( e -> hash = ( hash + e ).digest('md5') )"]}, + + {"kind":"then","text":"","code":["loaded != null","!loaded.isVirtual()","loaded.size() == shape.inject( 1, {prod, value -> prod * value} )","loaded.getDataType().getItemTypeClass() == String.class // Auto convert! (stored as String)","hash == expected"]}, + + {"kind":"and","text":"","code":["csv.shape == shape","csv.valueSize == shape.inject( 1, {prod, value -> prod * value} )","csv.totalSize == byteSize","csv.dataSize == byteSize","csv.location.endsWith( filename )","csv.dataType == DataType.of( String.class )","loaded.dataType == DataType.of( String.class )"]}, + + {"kind":"where","text":"The following jpg files with their expected shape and hash were used.","code":{"filename":["\"biostats.csv\"","\"biostats.csv\"","\"biostats.csv\"","\"biostats.csv\""],"params":["[:]","[firstRowIsLabels:true]","[firstColIsIndex:true]","[firstColIsIndex:true,firstRowIsLabels:true]"],"byteSize":["753","702","639","594"],"shape":["[19, 5]","[18, 5]","[19, 4]","[18, 4]"],"expected":["\"a3dc4ede7814b5d35d20a8c9310cd63c\"","\"baac406a366a51cb6d69e97a90711050\"","\"90c5d3a4b1ea87901879993bb79e9bc1\"","\"61d75f3dccc8d6987e686d151a423310\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["CSVHandle csv = new CSVHandle( \"build/resources/test/csv/\" + filename, params )","Tensor loaded = csv.load()","var hash = loaded.toString().digest('md5')//.forEach( e -> hash = ( hash + e ).digest('md5') )"]}, + + {"kind":"then","text":"","code":["loaded != null","!loaded.isVirtual()","loaded.size() == shape.inject( 1, {prod, value -> prod * value} )","loaded.getDataType().getItemTypeClass() == String.class // Auto convert! (stored as String)","hash == expected"]}, + + {"kind":"and","text":"","code":["csv.shape == shape","csv.valueSize == shape.inject( 1, {prod, value -> prod * value} )","csv.totalSize == byteSize","csv.dataSize == byteSize","csv.location.endsWith( filename )","csv.dataType == DataType.of( String.class )","loaded.dataType == DataType.of( String.class )"]}, + + {"kind":"where","text":"The following jpg files with their expected shape and hash were used.","code":{"filename":["\"biostats.csv\"","\"biostats.csv\"","\"biostats.csv\"","\"biostats.csv\""],"params":["[:]","[firstRowIsLabels:true]","[firstColIsIndex:true]","[firstColIsIndex:true,firstRowIsLabels:true]"],"byteSize":["753","702","639","594"],"shape":["[19, 5]","[18, 5]","[19, 4]","[18, 4]"],"expected":["\"a3dc4ede7814b5d35d20a8c9310cd63c\"","\"baac406a366a51cb6d69e97a90711050\"","\"90c5d3a4b1ea87901879993bb79e9bc1\"","\"61d75f3dccc8d6987e686d151a423310\""]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The FileDevice component \"CSVHead\" can read CSV file formats and load them as tensors. [3]", + "result":"PASS", + "duration":"0", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -106,7 +510,7 @@ { "id":"Fully labeled tenors will be stored with their labels included when saving them as CSV.", "result":"PASS", - "duration":"0.004 seconds", + "duration":"0.005 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -129,7 +533,7 @@ { "id":"Partially labeled tenors will be stored with their labels included when saving them as CSV.", "result":"PASS", - "duration":"0", + "duration":"0.003 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.utility.ListReader_Exception_Spec.json b/docs/spock/reports/ut.utility.ListReader_Exception_Spec.json index d098b1e32..2b890cdc4 100644 --- a/docs/spock/reports/ut.utility.ListReader_Exception_Spec.json +++ b/docs/spock/reports/ut.utility.ListReader_Exception_Spec.json @@ -9,14 +9,14 @@ "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.006 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The ListReader will detect inconsistent types in the provided data.", "result":"PASS", - "duration":"0", + "duration":"0.002 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -33,7 +33,7 @@ { "id":"The ListReader will detect inconsistent degrees of nesting in the provided data.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, diff --git a/docs/spock/reports/ut.utility.ListReader_Spec.json b/docs/spock/reports/ut.utility.ListReader_Spec.json index 81c3a6802..f84fec9d1 100644 --- a/docs/spock/reports/ut.utility.ListReader_Spec.json +++ b/docs/spock/reports/ut.utility.ListReader_Spec.json @@ -4,19 +4,19 @@ "narrative":"This specification covers an internal class which should not be used\n outside this library, namely the ListReader class.\n This class is simply a converter which turns nested lists\n into flat arrays alongside the type of the elements and the shape of this \"tensor\".", "subjects":[], "statistics":{ - "runs":"3", + "runs":"8", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.003 seconds" + "duration":"0.007 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { "id":"The ListReader can interpret nested lists resembling a matrix into a shape list and value list.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -35,7 +35,7 @@ { "id":"The ListReader can interpret nested lists resembling a 3D tensor into a shape list and value list.", "result":"PASS", - "duration":"0", + "duration":"0.001 seconds", "iterations":{ "tags":{},"see":[],"extraInfo":[] }, @@ -52,7 +52,102 @@ }, { - "id":"The ListReader can interpret nested lists into a shape list and value list.", + "id":"The ListReader can interpret nested lists into a shape list and value list. [0]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We use the reader to internally fill 2 lists representing shape and data...","code":["var result = ListReader.read(data, (o)->o)"]}, + + {"kind":"then","text":"The shape list will have the shape of the \"matrix\".","code":["result.shape == expectedShape"]}, + + {"kind":"and","text":"The flattened data is as expected!","code":["result.data == expectedData"]}, + + {"kind":"where","text":"","code":{"data":["[42]","[[43]]","[[-1],[+1]]","[[24, 42]]","[[\"24\", \"42\"]]","[[true],[false]]"],"expectedShape":["[1]","[1, 1]","[2, 1]","[1, 2]","[1, 2]","[2, 1]"],"expectedData":["[42]","[43]","[-1, 1]","[24, 42]","[\"24\", \"42\"]","[true, false]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The ListReader can interpret nested lists into a shape list and value list. [1]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We use the reader to internally fill 2 lists representing shape and data...","code":["var result = ListReader.read(data, (o)->o)"]}, + + {"kind":"then","text":"The shape list will have the shape of the \"matrix\".","code":["result.shape == expectedShape"]}, + + {"kind":"and","text":"The flattened data is as expected!","code":["result.data == expectedData"]}, + + {"kind":"where","text":"","code":{"data":["[42]","[[43]]","[[-1],[+1]]","[[24, 42]]","[[\"24\", \"42\"]]","[[true],[false]]"],"expectedShape":["[1]","[1, 1]","[2, 1]","[1, 2]","[1, 2]","[2, 1]"],"expectedData":["[42]","[43]","[-1, 1]","[24, 42]","[\"24\", \"42\"]","[true, false]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The ListReader can interpret nested lists into a shape list and value list. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We use the reader to internally fill 2 lists representing shape and data...","code":["var result = ListReader.read(data, (o)->o)"]}, + + {"kind":"then","text":"The shape list will have the shape of the \"matrix\".","code":["result.shape == expectedShape"]}, + + {"kind":"and","text":"The flattened data is as expected!","code":["result.data == expectedData"]}, + + {"kind":"where","text":"","code":{"data":["[42]","[[43]]","[[-1],[+1]]","[[24, 42]]","[[\"24\", \"42\"]]","[[true],[false]]"],"expectedShape":["[1]","[1, 1]","[2, 1]","[1, 2]","[1, 2]","[2, 1]"],"expectedData":["[42]","[43]","[-1, 1]","[24, 42]","[\"24\", \"42\"]","[true, false]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The ListReader can interpret nested lists into a shape list and value list. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We use the reader to internally fill 2 lists representing shape and data...","code":["var result = ListReader.read(data, (o)->o)"]}, + + {"kind":"then","text":"The shape list will have the shape of the \"matrix\".","code":["result.shape == expectedShape"]}, + + {"kind":"and","text":"The flattened data is as expected!","code":["result.data == expectedData"]}, + + {"kind":"where","text":"","code":{"data":["[42]","[[43]]","[[-1],[+1]]","[[24, 42]]","[[\"24\", \"42\"]]","[[true],[false]]"],"expectedShape":["[1]","[1, 1]","[2, 1]","[1, 2]","[1, 2]","[2, 1]"],"expectedData":["[42]","[43]","[-1, 1]","[24, 42]","[\"24\", \"42\"]","[true, false]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The ListReader can interpret nested lists into a shape list and value list. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"We use the reader to internally fill 2 lists representing shape and data...","code":["var result = ListReader.read(data, (o)->o)"]}, + + {"kind":"then","text":"The shape list will have the shape of the \"matrix\".","code":["result.shape == expectedShape"]}, + + {"kind":"and","text":"The flattened data is as expected!","code":["result.data == expectedData"]}, + + {"kind":"where","text":"","code":{"data":["[42]","[[43]]","[[-1],[+1]]","[[24, 42]]","[[\"24\", \"42\"]]","[[true],[false]]"],"expectedShape":["[1]","[1, 1]","[2, 1]","[1, 2]","[1, 2]","[2, 1]"],"expectedData":["[42]","[43]","[-1, 1]","[24, 42]","[\"24\", \"42\"]","[true, false]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"The ListReader can interpret nested lists into a shape list and value list. [5]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/docs/spock/reports/ut.utility.Utility_Spec.json b/docs/spock/reports/ut.utility.Utility_Spec.json index 05edddea7..8c53637fc 100644 --- a/docs/spock/reports/ut.utility.Utility_Spec.json +++ b/docs/spock/reports/ut.utility.Utility_Spec.json @@ -4,17 +4,164 @@ "narrative":"", "subjects":[], "statistics":{ - "runs":"1", + "runs":"8", "successRate":"100.0%", "failures":"0", "errors":"0", "skipped":"0", - "duration":"0.006 seconds" + "duration":"0.026 seconds" }, "headers":[],"tags":{},"see":[], "features":[ { - "id":"Object arrays can be converted to primitive arrays.", + "id":"Object arrays can be converted to primitive arrays. [0]", + "result":"PASS", + "duration":"0.003 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = code(input)"]}, + + {"kind":"then","text":"","code":["result == input"]}, + + {"kind":"and","text":"","code":["result.class != input.class"]}, + + {"kind":"and","text":"","code":["result.class == expectedType"]}, + + {"kind":"where","text":"","code":{"input":["[1, 2, 3] as Float[]","[1, 2, 3] as Double[]","[1, 2, 3] as Integer[]","[1, 2, 3] as Long[]","[1, 2, 3] as Short[]","[1, 2, 3] as Byte[]","[1, 2, 3] as Boolean[]","[1, 2, 3] as Character[]"],"code":["{DataConverter.Utility.objFloatsToPrimFloats(it)}","{DataConverter.Utility.objDoublesToPrimDoubles(it)}","{DataConverter.Utility.objIntsToPrimInts(it)}","{DataConverter.Utility.objLongsToPrimLongs(it)}","{DataConverter.Utility.objShortsToPrimShorts(it)}","{DataConverter.Utility.objBytesToPrimBytes(it)}","{DataConverter.Utility.objBooleansToPrimBooleans(it)}","{DataConverter.Utility.objCharsToPrimChars(it)}"],"expectedType":["float[]","double[]","int[]","long[]","short[]","byte[]","boolean[]","char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Object arrays can be converted to primitive arrays. [1]", + "result":"PASS", + "duration":"0.001 seconds", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = code(input)"]}, + + {"kind":"then","text":"","code":["result == input"]}, + + {"kind":"and","text":"","code":["result.class != input.class"]}, + + {"kind":"and","text":"","code":["result.class == expectedType"]}, + + {"kind":"where","text":"","code":{"input":["[1, 2, 3] as Float[]","[1, 2, 3] as Double[]","[1, 2, 3] as Integer[]","[1, 2, 3] as Long[]","[1, 2, 3] as Short[]","[1, 2, 3] as Byte[]","[1, 2, 3] as Boolean[]","[1, 2, 3] as Character[]"],"code":["{DataConverter.Utility.objFloatsToPrimFloats(it)}","{DataConverter.Utility.objDoublesToPrimDoubles(it)}","{DataConverter.Utility.objIntsToPrimInts(it)}","{DataConverter.Utility.objLongsToPrimLongs(it)}","{DataConverter.Utility.objShortsToPrimShorts(it)}","{DataConverter.Utility.objBytesToPrimBytes(it)}","{DataConverter.Utility.objBooleansToPrimBooleans(it)}","{DataConverter.Utility.objCharsToPrimChars(it)}"],"expectedType":["float[]","double[]","int[]","long[]","short[]","byte[]","boolean[]","char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Object arrays can be converted to primitive arrays. [2]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = code(input)"]}, + + {"kind":"then","text":"","code":["result == input"]}, + + {"kind":"and","text":"","code":["result.class != input.class"]}, + + {"kind":"and","text":"","code":["result.class == expectedType"]}, + + {"kind":"where","text":"","code":{"input":["[1, 2, 3] as Float[]","[1, 2, 3] as Double[]","[1, 2, 3] as Integer[]","[1, 2, 3] as Long[]","[1, 2, 3] as Short[]","[1, 2, 3] as Byte[]","[1, 2, 3] as Boolean[]","[1, 2, 3] as Character[]"],"code":["{DataConverter.Utility.objFloatsToPrimFloats(it)}","{DataConverter.Utility.objDoublesToPrimDoubles(it)}","{DataConverter.Utility.objIntsToPrimInts(it)}","{DataConverter.Utility.objLongsToPrimLongs(it)}","{DataConverter.Utility.objShortsToPrimShorts(it)}","{DataConverter.Utility.objBytesToPrimBytes(it)}","{DataConverter.Utility.objBooleansToPrimBooleans(it)}","{DataConverter.Utility.objCharsToPrimChars(it)}"],"expectedType":["float[]","double[]","int[]","long[]","short[]","byte[]","boolean[]","char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Object arrays can be converted to primitive arrays. [3]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = code(input)"]}, + + {"kind":"then","text":"","code":["result == input"]}, + + {"kind":"and","text":"","code":["result.class != input.class"]}, + + {"kind":"and","text":"","code":["result.class == expectedType"]}, + + {"kind":"where","text":"","code":{"input":["[1, 2, 3] as Float[]","[1, 2, 3] as Double[]","[1, 2, 3] as Integer[]","[1, 2, 3] as Long[]","[1, 2, 3] as Short[]","[1, 2, 3] as Byte[]","[1, 2, 3] as Boolean[]","[1, 2, 3] as Character[]"],"code":["{DataConverter.Utility.objFloatsToPrimFloats(it)}","{DataConverter.Utility.objDoublesToPrimDoubles(it)}","{DataConverter.Utility.objIntsToPrimInts(it)}","{DataConverter.Utility.objLongsToPrimLongs(it)}","{DataConverter.Utility.objShortsToPrimShorts(it)}","{DataConverter.Utility.objBytesToPrimBytes(it)}","{DataConverter.Utility.objBooleansToPrimBooleans(it)}","{DataConverter.Utility.objCharsToPrimChars(it)}"],"expectedType":["float[]","double[]","int[]","long[]","short[]","byte[]","boolean[]","char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Object arrays can be converted to primitive arrays. [4]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = code(input)"]}, + + {"kind":"then","text":"","code":["result == input"]}, + + {"kind":"and","text":"","code":["result.class != input.class"]}, + + {"kind":"and","text":"","code":["result.class == expectedType"]}, + + {"kind":"where","text":"","code":{"input":["[1, 2, 3] as Float[]","[1, 2, 3] as Double[]","[1, 2, 3] as Integer[]","[1, 2, 3] as Long[]","[1, 2, 3] as Short[]","[1, 2, 3] as Byte[]","[1, 2, 3] as Boolean[]","[1, 2, 3] as Character[]"],"code":["{DataConverter.Utility.objFloatsToPrimFloats(it)}","{DataConverter.Utility.objDoublesToPrimDoubles(it)}","{DataConverter.Utility.objIntsToPrimInts(it)}","{DataConverter.Utility.objLongsToPrimLongs(it)}","{DataConverter.Utility.objShortsToPrimShorts(it)}","{DataConverter.Utility.objBytesToPrimBytes(it)}","{DataConverter.Utility.objBooleansToPrimBooleans(it)}","{DataConverter.Utility.objCharsToPrimChars(it)}"],"expectedType":["float[]","double[]","int[]","long[]","short[]","byte[]","boolean[]","char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Object arrays can be converted to primitive arrays. [5]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = code(input)"]}, + + {"kind":"then","text":"","code":["result == input"]}, + + {"kind":"and","text":"","code":["result.class != input.class"]}, + + {"kind":"and","text":"","code":["result.class == expectedType"]}, + + {"kind":"where","text":"","code":{"input":["[1, 2, 3] as Float[]","[1, 2, 3] as Double[]","[1, 2, 3] as Integer[]","[1, 2, 3] as Long[]","[1, 2, 3] as Short[]","[1, 2, 3] as Byte[]","[1, 2, 3] as Boolean[]","[1, 2, 3] as Character[]"],"code":["{DataConverter.Utility.objFloatsToPrimFloats(it)}","{DataConverter.Utility.objDoublesToPrimDoubles(it)}","{DataConverter.Utility.objIntsToPrimInts(it)}","{DataConverter.Utility.objLongsToPrimLongs(it)}","{DataConverter.Utility.objShortsToPrimShorts(it)}","{DataConverter.Utility.objBytesToPrimBytes(it)}","{DataConverter.Utility.objBooleansToPrimBooleans(it)}","{DataConverter.Utility.objCharsToPrimChars(it)}"],"expectedType":["float[]","double[]","int[]","long[]","short[]","byte[]","boolean[]","char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Object arrays can be converted to primitive arrays. [6]", + "result":"PASS", + "duration":"0", + "iterations":{ + "tags":{},"see":[],"extraInfo":[] + }, + "blocks":[ + {"kind":"when","text":"","code":["def result = code(input)"]}, + + {"kind":"then","text":"","code":["result == input"]}, + + {"kind":"and","text":"","code":["result.class != input.class"]}, + + {"kind":"and","text":"","code":["result.class == expectedType"]}, + + {"kind":"where","text":"","code":{"input":["[1, 2, 3] as Float[]","[1, 2, 3] as Double[]","[1, 2, 3] as Integer[]","[1, 2, 3] as Long[]","[1, 2, 3] as Short[]","[1, 2, 3] as Byte[]","[1, 2, 3] as Boolean[]","[1, 2, 3] as Character[]"],"code":["{DataConverter.Utility.objFloatsToPrimFloats(it)}","{DataConverter.Utility.objDoublesToPrimDoubles(it)}","{DataConverter.Utility.objIntsToPrimInts(it)}","{DataConverter.Utility.objLongsToPrimLongs(it)}","{DataConverter.Utility.objShortsToPrimShorts(it)}","{DataConverter.Utility.objBytesToPrimBytes(it)}","{DataConverter.Utility.objBooleansToPrimBooleans(it)}","{DataConverter.Utility.objCharsToPrimChars(it)}"],"expectedType":["float[]","double[]","int[]","long[]","short[]","byte[]","boolean[]","char[]"]}} + ], + "problems":{"dataValues":[], "errors":[]} + }, + + { + "id":"Object arrays can be converted to primitive arrays. [7]", "result":"PASS", "duration":"0", "iterations":{ diff --git a/production/lib/neureka-1.0.1.jar b/production/lib/neureka-1.0.1.jar new file mode 100644 index 000000000..5cce86450 Binary files /dev/null and b/production/lib/neureka-1.0.1.jar differ